net/enic: support max descriptors allowed by adapter
Newer VIC adapters have the max number of supported RX and TX descriptors in their configuration. Use these values as the maximums. Signed-off-by: John Daley <johndale@cisco.com> Reviewed-by: Hyong Youb Kim <hyonkim@cisco.com>
This commit is contained in:
parent
9ca71a5b27
commit
22572e84fb
@ -67,7 +67,8 @@ struct cq_enet_rq_desc_64 {
|
||||
uint16_t vlan;
|
||||
uint16_t checksum_fcoe;
|
||||
uint8_t flags;
|
||||
uint8_t unused[48];
|
||||
uint8_t fetch_idx_flags;
|
||||
uint8_t unused[47];
|
||||
uint8_t type_color;
|
||||
};
|
||||
|
||||
@ -92,6 +93,9 @@ struct cq_enet_rq_desc_64 {
|
||||
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
|
||||
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
|
||||
((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
|
||||
#define CQ_ENET_RQ_DESC_FETCH_IDX_BITS 2
|
||||
#define CQ_ENET_RQ_DESC_FETCH_IDX_MASK \
|
||||
((1 << CQ_ENET_RQ_DESC_FETCH_IDX_BITS) - 1)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
|
||||
|
||||
|
@ -26,6 +26,7 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
struct vnic_enet_config *c = &enic->config;
|
||||
int err;
|
||||
uint64_t sizes;
|
||||
uint32_t max_rq_descs, max_wq_descs;
|
||||
|
||||
err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
|
||||
if (err) {
|
||||
@ -57,6 +58,8 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
GET_CONFIG(loop_tag);
|
||||
GET_CONFIG(num_arfs);
|
||||
GET_CONFIG(max_pkt_size);
|
||||
GET_CONFIG(max_rq_ring);
|
||||
GET_CONFIG(max_wq_ring);
|
||||
|
||||
/* max packet size is only defined in newer VIC firmware
|
||||
* and will be 0 for legacy firmware and VICs
|
||||
@ -101,20 +104,29 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
((enic->filter_actions & FILTER_ACTION_COUNTER_FLAG) ?
|
||||
"count " : ""));
|
||||
|
||||
c->wq_desc_count = RTE_MIN((uint32_t)ENIC_MAX_WQ_DESCS,
|
||||
/* The max size of RQ and WQ rings are specified in 1500 series VICs and
|
||||
* beyond. If they are not specified by the VIC or if 64B CQ descriptors
|
||||
* are not being used, the max number of descriptors is 4096.
|
||||
*/
|
||||
max_wq_descs = (enic->cq64_request && c->max_wq_ring) ? c->max_wq_ring :
|
||||
ENIC_LEGACY_MAX_WQ_DESCS;
|
||||
c->wq_desc_count = RTE_MIN(max_wq_descs,
|
||||
RTE_MAX((uint32_t)ENIC_MIN_WQ_DESCS, c->wq_desc_count));
|
||||
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
|
||||
c->rq_desc_count = RTE_MIN((uint32_t)ENIC_MAX_RQ_DESCS,
|
||||
max_rq_descs = (enic->cq64_request && c->max_rq_ring) ? c->max_rq_ring
|
||||
: ENIC_LEGACY_MAX_WQ_DESCS;
|
||||
c->rq_desc_count = RTE_MIN(max_rq_descs,
|
||||
RTE_MAX((uint32_t)ENIC_MIN_RQ_DESCS, c->rq_desc_count));
|
||||
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
dev_debug(NULL, "Max supported VIC descriptors: WQ:%u, RQ:%u\n",
|
||||
max_wq_descs, max_rq_descs);
|
||||
|
||||
c->intr_timer_usec = RTE_MIN(c->intr_timer_usec,
|
||||
vnic_dev_get_intr_coal_timer_max(enic->vdev));
|
||||
|
||||
dev_info(enic_get_dev(enic),
|
||||
"vNIC MAC addr " RTE_ETHER_ADDR_PRT_FMT
|
||||
"wq/rq %d/%d mtu %d, max mtu:%d\n",
|
||||
" wq/rq %d/%d mtu %d, max mtu:%d\n",
|
||||
enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
|
||||
enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
|
||||
c->wq_desc_count, c->rq_desc_count,
|
||||
|
@ -12,9 +12,11 @@
|
||||
#include "vnic_rq.h"
|
||||
|
||||
#define ENIC_MIN_WQ_DESCS 64
|
||||
#define ENIC_MAX_WQ_DESCS 4096
|
||||
#define ENIC_MIN_RQ_DESCS 64
|
||||
#define ENIC_MAX_RQ_DESCS 4096
|
||||
|
||||
/* 1400 series VICs and prior all have 4K max, after that it's in the config */
|
||||
#define ENIC_LEGACY_MAX_WQ_DESCS 4096
|
||||
#define ENIC_LEGACY_MAX_RQ_DESCS 4096
|
||||
|
||||
/* A descriptor ring has a multiple of 32 descriptors */
|
||||
#define ENIC_ALIGN_DESCS 32
|
||||
|
@ -84,6 +84,7 @@ enic_recv_pkts_common(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint8_t packet_error;
|
||||
uint16_t ciflags;
|
||||
uint8_t tc;
|
||||
uint16_t rq_idx_msbs = 0;
|
||||
|
||||
max_rx--;
|
||||
|
||||
@ -94,17 +95,24 @@ enic_recv_pkts_common(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
|
||||
/* Get the cq descriptor and extract rq info from it */
|
||||
cqd = *cqd_ptr;
|
||||
|
||||
/*
|
||||
* The first 16B of 64B descriptor is identical to the
|
||||
* 16B descriptor, except type_color. Copy type_color
|
||||
* from the 64B descriptor into the 16B descriptor's
|
||||
* field, so the code below can assume the 16B
|
||||
* descriptor format.
|
||||
* The first 16B of a 64B descriptor is identical to a 16B
|
||||
* descriptor except for the type_color and fetch index. Extract
|
||||
* fetch index and copy the type_color from the 64B to where it
|
||||
* would be in a 16B descriptor so sebwequent code can run
|
||||
* without further conditionals.
|
||||
*/
|
||||
if (use_64b_desc)
|
||||
if (use_64b_desc) {
|
||||
rq_idx_msbs = (((volatile struct cq_enet_rq_desc_64 *)
|
||||
cqd_ptr)->fetch_idx_flags
|
||||
& CQ_ENET_RQ_DESC_FETCH_IDX_MASK)
|
||||
<< CQ_DESC_COMP_NDX_BITS;
|
||||
cqd.type_color = tc;
|
||||
}
|
||||
rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
|
||||
rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
|
||||
rq_idx = rq_idx_msbs +
|
||||
(cqd.completed_index & CQ_DESC_COMP_NDX_MASK);
|
||||
|
||||
rq = &enic->rq[rq_num];
|
||||
rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
|
||||
@ -362,14 +370,19 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq,
|
||||
uint16_t completed_index)
|
||||
{
|
||||
struct rte_mbuf *buf;
|
||||
struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
|
||||
struct rte_mbuf *m, *free[ENIC_LEGACY_MAX_WQ_DESCS];
|
||||
unsigned int nb_to_free, nb_free = 0, i;
|
||||
struct rte_mempool *pool;
|
||||
unsigned int tail_idx;
|
||||
unsigned int desc_count = wq->ring.desc_count;
|
||||
|
||||
nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
|
||||
+ 1;
|
||||
/*
|
||||
* On 1500 Series VIC and beyond, greater than ENIC_LEGACY_MAX_WQ_DESCS
|
||||
* may be attempted to be freed. Cap it at ENIC_LEGACY_MAX_WQ_DESCS.
|
||||
*/
|
||||
nb_to_free = RTE_MIN(enic_ring_sub(desc_count, wq->tail_idx,
|
||||
completed_index) + 1,
|
||||
(uint32_t)ENIC_LEGACY_MAX_WQ_DESCS);
|
||||
tail_idx = wq->tail_idx;
|
||||
pool = wq->bufs[tail_idx]->pool;
|
||||
for (i = 0; i < nb_to_free; i++) {
|
||||
@ -381,7 +394,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq,
|
||||
}
|
||||
|
||||
if (likely(m->pool == pool)) {
|
||||
RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
|
||||
RTE_ASSERT(nb_free < ENIC_LEGACY_MAX_WQ_DESCS);
|
||||
free[nb_free++] = m;
|
||||
} else {
|
||||
rte_mempool_put_bulk(pool, (void *)free, nb_free);
|
||||
|
Loading…
Reference in New Issue
Block a user