kni: remove continuous memory restriction
Use mempool buf_addr and buf_physaddr fields for address translation. Since each mbuf address calculated separately, the restriction of all mbufs should come from a continuous memory restriction is no more valid. mbuf related FIFO's content changed, rx_q and alloc_q now carries physical address of mbufs. tx_q and free_q content not changed, they still carries virtual address of mbufs. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
f503b8cff5
commit
8451269e6d
@ -115,7 +115,8 @@ struct rte_kni_fifo {
|
||||
*/
|
||||
struct rte_kni_mbuf {
|
||||
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
|
||||
char pad0[10];
|
||||
uint64_t buf_physaddr;
|
||||
char pad0[2];
|
||||
uint16_t data_off; /**< Start address of data in segment buffer. */
|
||||
char pad1[2];
|
||||
uint8_t nb_segs; /**< Number of segments. */
|
||||
|
@ -460,9 +460,6 @@ kni_ioctl_create(struct net *net,
|
||||
kni->sync_va = dev_info.sync_va;
|
||||
kni->sync_kva = phys_to_virt(dev_info.sync_phys);
|
||||
|
||||
kni->mbuf_kva = phys_to_virt(dev_info.mbuf_phys);
|
||||
kni->mbuf_va = dev_info.mbuf_va;
|
||||
|
||||
#ifdef RTE_KNI_VHOST
|
||||
kni->vhost_queue = NULL;
|
||||
kni->vq_status = BE_STOP;
|
||||
@ -481,9 +478,6 @@ kni_ioctl_create(struct net *net,
|
||||
(unsigned long long) dev_info.req_phys, kni->req_q);
|
||||
KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.resp_phys, kni->resp_q);
|
||||
KNI_PRINT("mbuf_phys: 0x%016llx, mbuf_kva: 0x%p\n",
|
||||
(unsigned long long) dev_info.mbuf_phys, kni->mbuf_kva);
|
||||
KNI_PRINT("mbuf_va: 0x%p\n", dev_info.mbuf_va);
|
||||
KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
|
||||
|
||||
KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
|
||||
|
@ -61,6 +61,44 @@ static int kni_net_process_request(struct kni_dev *kni,
|
||||
/* kni rx function pointer, with default to normal rx */
|
||||
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
|
||||
|
||||
/* physical address to kernel virtual address */
|
||||
static void *
|
||||
pa2kva(void *pa)
|
||||
{
|
||||
return phys_to_virt((unsigned long)pa);
|
||||
}
|
||||
|
||||
/* physical address to virtual address */
|
||||
static void *
|
||||
pa2va(void *pa, struct rte_kni_mbuf *m)
|
||||
{
|
||||
void *va;
|
||||
|
||||
va = (void *)((unsigned long)pa +
|
||||
(unsigned long)m->buf_addr -
|
||||
(unsigned long)m->buf_physaddr);
|
||||
return va;
|
||||
}
|
||||
|
||||
/* mbuf data kernel virtual address from mbuf kernel virtual address */
|
||||
static void *
|
||||
kva2data_kva(struct rte_kni_mbuf *m)
|
||||
{
|
||||
return phys_to_virt(m->buf_physaddr + m->data_off);
|
||||
}
|
||||
|
||||
/* virtual address to physical address */
|
||||
static void *
|
||||
va2pa(void *va, struct rte_kni_mbuf *m)
|
||||
{
|
||||
void *pa;
|
||||
|
||||
pa = (void *)((unsigned long)va -
|
||||
((unsigned long)m->buf_addr -
|
||||
(unsigned long)m->buf_physaddr));
|
||||
return pa;
|
||||
}
|
||||
|
||||
/*
|
||||
* Open and close
|
||||
*/
|
||||
@ -125,8 +163,9 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
uint32_t len;
|
||||
unsigned i, num_rx, num_fq;
|
||||
struct rte_kni_mbuf *kva;
|
||||
struct rte_kni_mbuf *va[MBUF_BURST_SZ];
|
||||
void * data_kva;
|
||||
void *pa[MBUF_BURST_SZ];
|
||||
void *va[MBUF_BURST_SZ];
|
||||
void *data_kva;
|
||||
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev = kni->net_dev;
|
||||
@ -142,17 +181,16 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
num_rx = min(num_fq, (unsigned)MBUF_BURST_SZ);
|
||||
|
||||
/* Burst dequeue from rx_q */
|
||||
num_rx = kni_fifo_get(kni->rx_q, (void **)va, num_rx);
|
||||
num_rx = kni_fifo_get(kni->rx_q, pa, num_rx);
|
||||
if (num_rx == 0)
|
||||
return;
|
||||
|
||||
/* Transfer received packets to netif */
|
||||
for (i = 0; i < num_rx; i++) {
|
||||
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
|
||||
kva = pa2kva(pa[i]);
|
||||
len = kva->pkt_len;
|
||||
|
||||
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
|
||||
+ kni->mbuf_kva;
|
||||
data_kva = kva2data_kva(kva);
|
||||
va[i] = pa2va(pa[i], kva);
|
||||
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (!skb) {
|
||||
@ -178,9 +216,8 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
if (!kva->next)
|
||||
break;
|
||||
|
||||
kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
|
||||
data_kva = kva->buf_addr + kva->data_off
|
||||
- kni->mbuf_va + kni->mbuf_kva;
|
||||
kva = pa2kva(va2pa(kva->next, kva));
|
||||
data_kva = kva2data_kva(kva);
|
||||
}
|
||||
}
|
||||
|
||||
@ -197,7 +234,7 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
}
|
||||
|
||||
/* Burst enqueue mbufs into free_q */
|
||||
ret = kni_fifo_put(kni->free_q, (void **)va, num_rx);
|
||||
ret = kni_fifo_put(kni->free_q, va, num_rx);
|
||||
if (ret != num_rx)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue entries into free_q\n");
|
||||
@ -213,11 +250,13 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
|
||||
uint32_t len;
|
||||
unsigned i, num, num_rq, num_tq, num_aq, num_fq;
|
||||
struct rte_kni_mbuf *kva;
|
||||
struct rte_kni_mbuf *va[MBUF_BURST_SZ];
|
||||
void *pa[MBUF_BURST_SZ];
|
||||
void *va[MBUF_BURST_SZ];
|
||||
void * data_kva;
|
||||
|
||||
struct rte_kni_mbuf *alloc_kva;
|
||||
struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ];
|
||||
void *alloc_pa[MBUF_BURST_SZ];
|
||||
void *alloc_va[MBUF_BURST_SZ];
|
||||
void *alloc_data_kva;
|
||||
|
||||
/* Get the number of entries in rx_q */
|
||||
@ -243,26 +282,25 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
|
||||
return;
|
||||
|
||||
/* Burst dequeue from rx_q */
|
||||
ret = kni_fifo_get(kni->rx_q, (void **)va, num);
|
||||
ret = kni_fifo_get(kni->rx_q, pa, num);
|
||||
if (ret == 0)
|
||||
return; /* Failing should not happen */
|
||||
|
||||
/* Dequeue entries from alloc_q */
|
||||
ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num);
|
||||
ret = kni_fifo_get(kni->alloc_q, alloc_pa, num);
|
||||
if (ret) {
|
||||
num = ret;
|
||||
/* Copy mbufs */
|
||||
for (i = 0; i < num; i++) {
|
||||
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
|
||||
kva = pa2kva(pa[i]);
|
||||
len = kva->pkt_len;
|
||||
data_kva = kva->buf_addr + kva->data_off -
|
||||
kni->mbuf_va + kni->mbuf_kva;
|
||||
data_kva = kva2data_kva(kva);
|
||||
va[i] = pa2va(pa[i], kva);
|
||||
|
||||
alloc_kva = pa2kva(alloc_pa[i]);
|
||||
alloc_data_kva = kva2data_kva(alloc_kva);
|
||||
alloc_va[i] = pa2va(alloc_pa[i], alloc_kva);
|
||||
|
||||
alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
|
||||
kni->mbuf_kva;
|
||||
alloc_data_kva = alloc_kva->buf_addr +
|
||||
alloc_kva->data_off - kni->mbuf_va +
|
||||
kni->mbuf_kva;
|
||||
memcpy(alloc_data_kva, data_kva, len);
|
||||
alloc_kva->pkt_len = len;
|
||||
alloc_kva->data_len = len;
|
||||
@ -272,14 +310,14 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
|
||||
}
|
||||
|
||||
/* Burst enqueue mbufs into tx_q */
|
||||
ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num);
|
||||
ret = kni_fifo_put(kni->tx_q, alloc_va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into tx_q\n");
|
||||
}
|
||||
|
||||
/* Burst enqueue mbufs into free_q */
|
||||
ret = kni_fifo_put(kni->free_q, (void **)va, num);
|
||||
ret = kni_fifo_put(kni->free_q, va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into free_q\n");
|
||||
@ -302,8 +340,9 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
uint32_t len;
|
||||
unsigned i, num_rq, num_fq, num;
|
||||
struct rte_kni_mbuf *kva;
|
||||
struct rte_kni_mbuf *va[MBUF_BURST_SZ];
|
||||
void * data_kva;
|
||||
void *pa[MBUF_BURST_SZ];
|
||||
void *va[MBUF_BURST_SZ];
|
||||
void *data_kva;
|
||||
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev = kni->net_dev;
|
||||
@ -323,16 +362,16 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
return;
|
||||
|
||||
/* Burst dequeue mbufs from rx_q */
|
||||
ret = kni_fifo_get(kni->rx_q, (void **)va, num);
|
||||
ret = kni_fifo_get(kni->rx_q, pa, num);
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
/* Copy mbufs to sk buffer and then call tx interface */
|
||||
for (i = 0; i < num; i++) {
|
||||
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
|
||||
kva = pa2kva(pa[i]);
|
||||
len = kva->pkt_len;
|
||||
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
|
||||
kni->mbuf_kva;
|
||||
data_kva = kva2data_kva(kva);
|
||||
va[i] = pa2va(pa[i], kva);
|
||||
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (skb == NULL)
|
||||
@ -370,9 +409,8 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
if (!kva->next)
|
||||
break;
|
||||
|
||||
kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
|
||||
data_kva = kva->buf_addr + kva->data_off
|
||||
- kni->mbuf_va + kni->mbuf_kva;
|
||||
kva = pa2kva(va2pa(kva->next, kva));
|
||||
data_kva = kva2data_kva(kva);
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,7 +425,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
}
|
||||
|
||||
/* enqueue all the mbufs from rx_q into free_q */
|
||||
ret = kni_fifo_put(kni->free_q, (void **)&va, num);
|
||||
ret = kni_fifo_put(kni->free_q, va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into free_q\n");
|
||||
@ -426,7 +464,8 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
unsigned ret;
|
||||
struct kni_dev *kni = netdev_priv(dev);
|
||||
struct rte_kni_mbuf *pkt_kva = NULL;
|
||||
struct rte_kni_mbuf *pkt_va = NULL;
|
||||
void *pkt_pa = NULL;
|
||||
void *pkt_va = NULL;
|
||||
|
||||
/* save the timestamp */
|
||||
#ifdef HAVE_TRANS_START_HELPER
|
||||
@ -453,13 +492,13 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
/* dequeue a mbuf from alloc_q */
|
||||
ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
|
||||
ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
|
||||
if (likely(ret == 1)) {
|
||||
void *data_kva;
|
||||
|
||||
pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
|
||||
data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
|
||||
+ kni->mbuf_kva;
|
||||
pkt_kva = pa2kva(pkt_pa);
|
||||
data_kva = kva2data_kva(pkt_kva);
|
||||
pkt_va = pa2va(pkt_pa, pkt_kva);
|
||||
|
||||
len = skb->len;
|
||||
memcpy(data_kva, skb->data, len);
|
||||
@ -471,7 +510,7 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
pkt_kva->data_len = len;
|
||||
|
||||
/* enqueue mbuf into tx_q */
|
||||
ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
|
||||
ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
|
||||
if (unlikely(ret != 1)) {
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbuf into tx_q\n");
|
||||
|
@ -416,14 +416,6 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
|
||||
dev_info.sync_va = mz->addr;
|
||||
dev_info.sync_phys = mz->phys_addr;
|
||||
|
||||
|
||||
/* MBUF mempool */
|
||||
/* KNI currently requires to have only one memory chunk */
|
||||
if (pktmbuf_pool->nb_mem_chunks != 1)
|
||||
goto kni_fail;
|
||||
|
||||
dev_info.mbuf_va = STAILQ_FIRST(&pktmbuf_pool->mem_list)->addr;
|
||||
dev_info.mbuf_phys = STAILQ_FIRST(&pktmbuf_pool->mem_list)->phys_addr;
|
||||
ctx->pktmbuf_pool = pktmbuf_pool;
|
||||
ctx->group_id = conf->group_id;
|
||||
ctx->slot_id = slot->id;
|
||||
@ -459,6 +451,20 @@ kni_free_fifo(struct rte_kni_fifo *fifo)
|
||||
} while (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_free_fifo_phy(struct rte_kni_fifo *fifo)
|
||||
{
|
||||
void *mbuf_phys;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = kni_fifo_get(fifo, &mbuf_phys, 1);
|
||||
/*
|
||||
* TODO: free mbufs
|
||||
*/
|
||||
} while (ret);
|
||||
}
|
||||
|
||||
int
|
||||
rte_kni_release(struct rte_kni *kni)
|
||||
{
|
||||
@ -476,8 +482,8 @@ rte_kni_release(struct rte_kni *kni)
|
||||
|
||||
/* mbufs in all fifo should be released, except request/response */
|
||||
kni_free_fifo(kni->tx_q);
|
||||
kni_free_fifo(kni->rx_q);
|
||||
kni_free_fifo(kni->alloc_q);
|
||||
kni_free_fifo_phy(kni->rx_q);
|
||||
kni_free_fifo_phy(kni->alloc_q);
|
||||
kni_free_fifo(kni->free_q);
|
||||
|
||||
slot_id = kni->slot_id;
|
||||
@ -543,10 +549,25 @@ rte_kni_handle_request(struct rte_kni *kni)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
va2pa(struct rte_mbuf *m)
|
||||
{
|
||||
return (void *)((unsigned long)m -
|
||||
((unsigned long)m->buf_addr -
|
||||
(unsigned long)m->buf_physaddr));
|
||||
}
|
||||
|
||||
unsigned
|
||||
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
|
||||
{
|
||||
unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);
|
||||
void *phy_mbufs[num];
|
||||
unsigned int ret;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
phy_mbufs[i] = va2pa(mbufs[i]);
|
||||
|
||||
ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
|
||||
|
||||
/* Get mbufs from free_q and then free them */
|
||||
kni_free_mbufs(kni);
|
||||
@ -584,6 +605,7 @@ kni_allocate_mbufs(struct rte_kni *kni)
|
||||
{
|
||||
int i, ret;
|
||||
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
|
||||
void *phys[MAX_MBUF_BURST_NUM];
|
||||
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
|
||||
offsetof(struct rte_kni_mbuf, pool));
|
||||
@ -613,13 +635,14 @@ kni_allocate_mbufs(struct rte_kni *kni)
|
||||
RTE_LOG(ERR, KNI, "Out of memory\n");
|
||||
break;
|
||||
}
|
||||
phys[i] = va2pa(pkts[i]);
|
||||
}
|
||||
|
||||
/* No pkt mbuf alocated */
|
||||
if (i <= 0)
|
||||
return;
|
||||
|
||||
ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);
|
||||
ret = kni_fifo_put(kni->alloc_q, phys, i);
|
||||
|
||||
/* Check if any mbufs not put into alloc_q, and then free them */
|
||||
if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
|
||||
|
Loading…
Reference in New Issue
Block a user