2018-01-29 13:11:30 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2015 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2015 Mellanox Technologies, Ltd
|
2015-10-30 18:52:31 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RTE_PMD_MLX5_RXTX_H_
|
|
|
|
#define RTE_PMD_MLX5_RXTX_H_
|
|
|
|
|
2015-10-30 18:55:09 +00:00
|
|
|
#include <stddef.h>
|
2015-10-30 18:52:31 +00:00
|
|
|
#include <stdint.h>
|
2017-10-09 14:44:45 +00:00
|
|
|
#include <sys/queue.h>
|
2015-10-30 18:52:31 +00:00
|
|
|
|
|
|
|
/* Verbs header. */
|
|
|
|
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
|
|
|
|
#ifdef PEDANTIC
|
2016-09-19 14:36:54 +00:00
|
|
|
#pragma GCC diagnostic ignored "-Wpedantic"
|
2015-10-30 18:52:31 +00:00
|
|
|
#endif
|
|
|
|
#include <infiniband/verbs.h>
|
2017-09-26 15:38:24 +00:00
|
|
|
#include <infiniband/mlx5dv.h>
|
2015-10-30 18:52:31 +00:00
|
|
|
#ifdef PEDANTIC
|
2016-09-19 14:36:54 +00:00
|
|
|
#pragma GCC diagnostic error "-Wpedantic"
|
2015-10-30 18:52:31 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <rte_mbuf.h>
|
|
|
|
#include <rte_mempool.h>
|
2016-09-20 08:53:47 +00:00
|
|
|
#include <rte_common.h>
|
2017-08-23 07:10:58 +00:00
|
|
|
#include <rte_hexdump.h>
|
2017-10-09 14:44:45 +00:00
|
|
|
#include <rte_atomic.h>
|
2018-07-12 12:01:31 +00:00
|
|
|
#include <rte_spinlock.h>
|
|
|
|
#include <rte_io.h>
|
2015-10-30 18:52:31 +00:00
|
|
|
|
|
|
|
#include "mlx5_utils.h"
|
|
|
|
#include "mlx5.h"
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
#include "mlx5_mr.h"
|
2015-10-30 18:55:16 +00:00
|
|
|
#include "mlx5_autoconf.h"
|
2015-10-30 18:52:31 +00:00
|
|
|
#include "mlx5_defs.h"
|
2016-06-24 13:17:52 +00:00
|
|
|
#include "mlx5_prm.h"
|
2015-10-30 18:52:31 +00:00
|
|
|
|
2018-07-12 09:31:04 +00:00
|
|
|
/* Support tunnel matching. */
|
2018-07-12 09:31:06 +00:00
|
|
|
#define MLX5_FLOW_TUNNEL 5
|
2018-07-12 09:31:04 +00:00
|
|
|
|
2015-10-30 18:52:36 +00:00
|
|
|
struct mlx5_rxq_stats {
|
|
|
|
unsigned int idx; /**< Mapping index. */
|
|
|
|
#ifdef MLX5_PMD_SOFT_COUNTERS
|
|
|
|
uint64_t ipackets; /**< Total of successfully received packets. */
|
|
|
|
uint64_t ibytes; /**< Total of successfully received bytes. */
|
|
|
|
#endif
|
|
|
|
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
|
|
|
|
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlx5_txq_stats {
|
|
|
|
unsigned int idx; /**< Mapping index. */
|
|
|
|
#ifdef MLX5_PMD_SOFT_COUNTERS
|
|
|
|
uint64_t opackets; /**< Total of successfully sent packets. */
|
|
|
|
uint64_t obytes; /**< Total of successfully sent bytes. */
|
|
|
|
#endif
|
2017-09-14 10:50:37 +00:00
|
|
|
uint64_t oerrors; /**< Total number of failed transmitted packets. */
|
2015-10-30 18:52:36 +00:00
|
|
|
};
|
|
|
|
|
2015-10-30 18:52:31 +00:00
|
|
|
struct priv;
|
|
|
|
|
2016-06-24 13:17:54 +00:00
|
|
|
/* Compressed CQE context. */
|
|
|
|
struct rxq_zip {
|
|
|
|
uint16_t ai; /* Array index. */
|
|
|
|
uint16_t ca; /* Current array index. */
|
|
|
|
uint16_t na; /* Next array index. */
|
|
|
|
uint16_t cq_ci; /* The next CQE. */
|
|
|
|
uint32_t cqe_cnt; /* Number of CQEs. */
|
|
|
|
};
|
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
/* Multi-Packet RQ buffer header. */
|
|
|
|
struct mlx5_mprq_buf {
|
|
|
|
struct rte_mempool *mp;
|
|
|
|
rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
|
|
|
|
uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
|
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
|
|
|
/* Get pointer to the first stride. */
|
|
|
|
#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
|
|
|
|
|
2015-10-30 18:52:31 +00:00
|
|
|
/* RX queue descriptor. */
|
2017-10-09 14:44:39 +00:00
|
|
|
struct mlx5_rxq_data {
|
2015-10-30 18:52:41 +00:00
|
|
|
unsigned int csum:1; /* Enable checksum offloading. */
|
2017-10-10 14:37:07 +00:00
|
|
|
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
|
2016-03-03 14:26:44 +00:00
|
|
|
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
|
2016-03-17 15:38:56 +00:00
|
|
|
unsigned int crc_present:1; /* CRC must be subtracted. */
|
2016-06-24 13:18:04 +00:00
|
|
|
unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
|
2016-09-20 08:53:48 +00:00
|
|
|
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
|
2016-09-20 08:53:47 +00:00
|
|
|
unsigned int elts_n:4; /* Log 2 of Mbufs. */
|
2016-09-28 12:11:18 +00:00
|
|
|
unsigned int rss_hash:1; /* RSS hash result is enabled. */
|
2016-12-29 15:15:21 +00:00
|
|
|
unsigned int mark:1; /* Marked flow available on the queue. */
|
2018-05-09 11:13:50 +00:00
|
|
|
unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
|
|
|
|
unsigned int strd_sz_n:4; /* Log 2 of stride size. */
|
|
|
|
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
|
|
|
|
unsigned int :6; /* Remaining bits. */
|
2016-09-20 08:53:47 +00:00
|
|
|
volatile uint32_t *rq_db;
|
|
|
|
volatile uint32_t *cq_db;
|
2017-09-29 07:17:24 +00:00
|
|
|
uint16_t port_id;
|
2016-06-24 13:17:52 +00:00
|
|
|
uint16_t rq_ci;
|
2018-06-26 12:39:24 +00:00
|
|
|
uint16_t consumed_strd; /* Number of consumed strides in WQE. */
|
2017-07-06 18:41:10 +00:00
|
|
|
uint16_t rq_pi;
|
2016-06-24 13:17:52 +00:00
|
|
|
uint16_t cq_ci;
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
|
2018-05-09 11:13:50 +00:00
|
|
|
uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
|
|
|
|
volatile void *wqes;
|
2016-06-24 13:17:52 +00:00
|
|
|
volatile struct mlx5_cqe(*cqes)[];
|
2016-06-24 13:17:54 +00:00
|
|
|
struct rxq_zip zip; /* Compressed context. */
|
2018-05-09 11:13:50 +00:00
|
|
|
RTE_STD_C11
|
|
|
|
union {
|
|
|
|
struct rte_mbuf *(*elts)[];
|
|
|
|
struct mlx5_mprq_buf *(*mprq_bufs)[];
|
|
|
|
};
|
2016-06-24 13:17:52 +00:00
|
|
|
struct rte_mempool *mp;
|
2018-05-09 11:13:50 +00:00
|
|
|
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
|
|
|
|
struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
|
2016-06-24 13:17:52 +00:00
|
|
|
struct mlx5_rxq_stats stats;
|
2017-07-06 18:41:10 +00:00
|
|
|
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
|
|
|
|
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
|
2017-09-26 15:38:24 +00:00
|
|
|
void *cq_uar; /* CQ user access region. */
|
|
|
|
uint32_t cqn; /* CQ number. */
|
|
|
|
uint8_t cq_arm_sn; /* CQ arm seq number. */
|
2018-07-12 12:01:31 +00:00
|
|
|
#ifndef RTE_ARCH_64
|
|
|
|
rte_spinlock_t *uar_lock_cq;
|
|
|
|
/* CQ (UAR) access lock required for 32bit implementations */
|
|
|
|
#endif
|
2018-04-23 12:33:03 +00:00
|
|
|
uint32_t tunnel; /* Tunnel information. */
|
2016-06-24 13:17:47 +00:00
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
2017-10-09 14:44:46 +00:00
|
|
|
/* Verbs Rx queue elements. */
|
|
|
|
struct mlx5_rxq_ibv {
|
|
|
|
LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
|
|
|
struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
|
2016-06-24 13:17:52 +00:00
|
|
|
struct ibv_cq *cq; /* Completion Queue. */
|
2017-09-26 15:38:24 +00:00
|
|
|
struct ibv_wq *wq; /* Work Queue. */
|
2017-03-14 13:03:09 +00:00
|
|
|
struct ibv_comp_channel *channel;
|
2017-10-09 14:44:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* RX queue control descriptor. */
|
|
|
|
struct mlx5_rxq_ctrl {
|
2017-10-09 14:44:49 +00:00
|
|
|
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
2017-10-09 14:44:46 +00:00
|
|
|
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
struct priv *priv; /* Back pointer to private data. */
|
2017-10-09 14:44:39 +00:00
|
|
|
struct mlx5_rxq_data rxq; /* Data path structure. */
|
2017-10-09 14:44:46 +00:00
|
|
|
unsigned int socket; /* CPU socket ID for allocations. */
|
|
|
|
unsigned int irq:1; /* Whether IRQ is enabled. */
|
2018-03-13 09:23:55 +00:00
|
|
|
uint16_t idx; /* Queue index. */
|
2018-07-12 09:30:58 +00:00
|
|
|
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
|
2018-07-12 09:31:04 +00:00
|
|
|
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
|
2015-10-30 18:52:31 +00:00
|
|
|
};
|
|
|
|
|
2017-10-09 14:44:50 +00:00
|
|
|
/* Indirection table. */
|
|
|
|
struct mlx5_ind_table_ibv {
|
|
|
|
LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
|
|
|
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
|
2018-04-25 15:27:50 +00:00
|
|
|
uint32_t queues_n; /**< Number of queues in the list. */
|
2017-10-09 14:44:50 +00:00
|
|
|
uint16_t queues[]; /**< Queue list. */
|
|
|
|
};
|
|
|
|
|
2017-10-09 14:44:51 +00:00
|
|
|
/* Hash Rx queue. */
|
|
|
|
struct mlx5_hrxq {
|
|
|
|
LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
|
|
|
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
|
|
|
|
struct ibv_qp *qp; /* Verbs queue pair. */
|
|
|
|
uint64_t hash_fields; /* Verbs Hash fields. */
|
2018-04-25 15:27:50 +00:00
|
|
|
uint32_t rss_key_len; /* Hash key length in bytes. */
|
2017-10-09 14:44:51 +00:00
|
|
|
uint8_t rss_key[]; /* Hash key. */
|
|
|
|
};
|
|
|
|
|
2015-10-30 18:52:31 +00:00
|
|
|
/* TX queue descriptor. */
|
2017-06-30 17:19:08 +00:00
|
|
|
__extension__
|
2017-10-09 14:44:40 +00:00
|
|
|
struct mlx5_txq_data {
|
2017-07-06 18:41:06 +00:00
|
|
|
uint16_t elts_head; /* Current counter in (*elts)[]. */
|
|
|
|
uint16_t elts_tail; /* Counter of first element awaiting completion. */
|
2016-06-24 13:17:55 +00:00
|
|
|
uint16_t elts_comp; /* Counter since last completion request. */
|
2017-03-15 23:55:44 +00:00
|
|
|
uint16_t mpw_comp; /* WQ index since last completion request. */
|
2016-06-24 13:17:53 +00:00
|
|
|
uint16_t cq_ci; /* Consumer index for completion queue. */
|
2017-12-27 03:55:45 +00:00
|
|
|
#ifndef NDEBUG
|
2017-03-15 23:55:44 +00:00
|
|
|
uint16_t cq_pi; /* Producer index for completion queue. */
|
2017-12-27 03:55:45 +00:00
|
|
|
#endif
|
2016-06-24 13:17:53 +00:00
|
|
|
uint16_t wqe_ci; /* Consumer index for work queue. */
|
2017-02-02 10:34:12 +00:00
|
|
|
uint16_t wqe_pi; /* Producer index for work queue. */
|
2016-09-20 08:53:47 +00:00
|
|
|
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
|
2016-09-20 08:53:48 +00:00
|
|
|
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
|
2016-09-20 08:53:50 +00:00
|
|
|
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
|
2017-03-02 09:01:31 +00:00
|
|
|
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
|
2017-03-02 09:05:44 +00:00
|
|
|
uint16_t tunnel_en:1;
|
|
|
|
/* When set TX offload for tunneled packets are supported. */
|
2018-04-08 12:41:20 +00:00
|
|
|
uint16_t swp_en:1; /* Whether SW parser is enabled. */
|
2017-03-15 23:55:44 +00:00
|
|
|
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
|
|
|
|
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
|
|
|
|
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
|
2016-06-24 13:17:55 +00:00
|
|
|
uint32_t qp_num_8s; /* QP number shifted by 8. */
|
2018-01-10 09:17:00 +00:00
|
|
|
uint64_t offloads; /* Offloads for Tx Queue. */
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
|
2016-06-24 13:17:53 +00:00
|
|
|
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
|
2016-11-24 16:03:31 +00:00
|
|
|
volatile void *wqes; /* Work queue (use volatile to write into). */
|
2016-06-24 13:17:53 +00:00
|
|
|
volatile uint32_t *qp_db; /* Work queue doorbell. */
|
|
|
|
volatile uint32_t *cq_db; /* Completion queue doorbell. */
|
2018-01-25 15:00:24 +00:00
|
|
|
volatile void *bf_reg; /* Blueflame register remapped. */
|
2016-06-24 13:17:53 +00:00
|
|
|
struct rte_mbuf *(*elts)[]; /* TX elements. */
|
2015-10-30 18:52:36 +00:00
|
|
|
struct mlx5_txq_stats stats; /* TX queue counters. */
|
2018-07-12 12:01:31 +00:00
|
|
|
#ifndef RTE_ARCH_64
|
|
|
|
rte_spinlock_t *uar_lock;
|
|
|
|
/* UAR access lock required for 32bit implementations */
|
|
|
|
#endif
|
2016-06-24 13:17:46 +00:00
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
2017-10-09 14:44:47 +00:00
|
|
|
/* Verbs Rx queue elements. */
|
|
|
|
struct mlx5_txq_ibv {
|
|
|
|
LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
2018-03-13 09:23:55 +00:00
|
|
|
struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
|
2017-10-09 14:44:47 +00:00
|
|
|
struct ibv_cq *cq; /* Completion Queue. */
|
|
|
|
struct ibv_qp *qp; /* Queue Pair. */
|
|
|
|
};
|
|
|
|
|
2016-06-24 13:17:46 +00:00
|
|
|
/* TX queue control descriptor. */
|
2017-10-09 14:44:40 +00:00
|
|
|
struct mlx5_txq_ctrl {
|
2017-10-09 14:44:48 +00:00
|
|
|
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
|
|
|
|
rte_atomic32_t refcnt; /* Reference counter. */
|
2016-03-03 14:27:12 +00:00
|
|
|
unsigned int socket; /* CPU socket ID for allocations. */
|
2017-10-09 14:44:47 +00:00
|
|
|
unsigned int max_inline_data; /* Max inline data. */
|
|
|
|
unsigned int max_tso_header; /* Max TSO header size. */
|
|
|
|
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
struct priv *priv; /* Back pointer to private data. */
|
2017-10-09 14:44:40 +00:00
|
|
|
struct mlx5_txq_data txq; /* Data path structure. */
|
2017-10-06 15:45:49 +00:00
|
|
|
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
|
2018-01-25 15:00:24 +00:00
|
|
|
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
|
2018-03-13 09:23:55 +00:00
|
|
|
uint16_t idx; /* Queue index. */
|
2015-10-30 18:52:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* mlx5_rxq.c */
|
|
|
|
|
2015-10-30 18:55:11 +00:00
|
|
|
extern uint8_t rss_hash_default_key[];
|
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
int mlx5_check_mprq_support(struct rte_eth_dev *dev);
|
|
|
|
int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
|
|
|
|
int mlx5_mprq_enabled(struct rte_eth_dev *dev);
|
|
|
|
int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
|
|
|
|
int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
|
2018-03-05 12:20:58 +00:00
|
|
|
void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
|
|
|
|
int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|
|
|
unsigned int socket, const struct rte_eth_rxconf *conf,
|
|
|
|
struct rte_mempool *mp);
|
|
|
|
void mlx5_rx_queue_release(void *dpdk_rxq);
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
|
|
|
|
void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
|
2017-06-14 11:49:14 +00:00
|
|
|
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
|
|
|
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
2018-03-05 12:21:04 +00:00
|
|
|
struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
|
|
|
|
int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
|
2018-07-12 09:30:48 +00:00
|
|
|
struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
|
|
|
|
void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
|
|
|
|
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
|
|
|
|
uint16_t desc, unsigned int socket,
|
|
|
|
const struct rte_eth_rxconf *conf,
|
|
|
|
struct rte_mempool *mp);
|
|
|
|
struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_rxq_verify(struct rte_eth_dev *dev);
|
2018-03-05 12:20:58 +00:00
|
|
|
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
|
2018-05-09 11:13:50 +00:00
|
|
|
int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl);
|
2018-03-05 12:21:04 +00:00
|
|
|
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
|
2018-04-25 15:27:50 +00:00
|
|
|
const uint16_t *queues,
|
|
|
|
uint32_t queues_n);
|
2018-03-05 12:21:04 +00:00
|
|
|
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
|
2018-04-25 15:27:50 +00:00
|
|
|
const uint16_t *queues,
|
|
|
|
uint32_t queues_n);
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
|
|
|
|
struct mlx5_ind_table_ibv *ind_tbl);
|
|
|
|
int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
|
2018-07-12 09:30:48 +00:00
|
|
|
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
|
|
|
|
void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
|
2018-04-25 15:27:50 +00:00
|
|
|
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
|
|
|
|
const uint8_t *rss_key, uint32_t rss_key_len,
|
|
|
|
uint64_t hash_fields,
|
2018-08-01 08:37:38 +00:00
|
|
|
const uint16_t *queues, uint32_t queues_n,
|
|
|
|
int tunnel __rte_unused);
|
2018-04-25 15:27:50 +00:00
|
|
|
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
|
|
|
|
const uint8_t *rss_key, uint32_t rss_key_len,
|
|
|
|
uint64_t hash_fields,
|
2018-07-12 09:31:01 +00:00
|
|
|
const uint16_t *queues, uint32_t queues_n);
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
|
|
|
|
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
|
2018-07-12 09:30:48 +00:00
|
|
|
struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
|
|
|
|
void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
|
2018-03-05 12:21:04 +00:00
|
|
|
uint64_t mlx5_get_rx_port_offloads(void);
|
|
|
|
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
|
2015-10-30 18:52:31 +00:00
|
|
|
|
|
|
|
/* mlx5_txq.c */
|
|
|
|
|
2018-03-05 12:20:58 +00:00
|
|
|
int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|
|
|
unsigned int socket, const struct rte_eth_txconf *conf);
|
|
|
|
void mlx5_tx_queue_release(void *dpdk_txq);
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
|
|
|
|
struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
|
|
|
|
int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
|
|
|
|
int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
|
|
|
|
struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
|
|
|
|
uint16_t desc, unsigned int socket,
|
|
|
|
const struct rte_eth_txconf *conf);
|
|
|
|
struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx5_txq_verify(struct rte_eth_dev *dev);
|
2018-03-05 12:20:58 +00:00
|
|
|
void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
|
2018-03-05 12:21:04 +00:00
|
|
|
uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
|
2015-10-30 18:52:31 +00:00
|
|
|
|
|
|
|
/* mlx5_rxtx.c */
|
|
|
|
|
2017-07-26 19:29:33 +00:00
|
|
|
extern uint32_t mlx5_ptype_table[];
|
2018-04-08 12:41:20 +00:00
|
|
|
extern uint8_t mlx5_cksum_table[];
|
|
|
|
extern uint8_t mlx5_swp_types_table[];
|
2017-07-06 18:41:10 +00:00
|
|
|
|
2017-07-26 19:29:33 +00:00
|
|
|
void mlx5_set_ptype_table(void);
|
2018-04-08 12:41:20 +00:00
|
|
|
void mlx5_set_cksum_table(void);
|
|
|
|
void mlx5_set_swp_types_table(void);
|
2018-03-05 12:20:58 +00:00
|
|
|
uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
|
2018-05-09 11:13:50 +00:00
|
|
|
void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
|
|
|
|
void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
|
|
|
|
uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
2018-03-05 12:20:58 +00:00
|
|
|
uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
|
|
|
int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
2015-10-30 18:52:31 +00:00
|
|
|
|
2017-07-06 18:41:10 +00:00
|
|
|
/* Vectorized version of mlx5_rxtx.c */
|
2018-03-05 12:21:04 +00:00
|
|
|
int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
|
|
|
|
int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
|
|
|
|
int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
|
|
|
|
int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
|
2018-03-05 12:20:58 +00:00
|
|
|
uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
|
|
|
uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
uint16_t pkts_n);
|
2018-03-05 12:21:04 +00:00
|
|
|
uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
|
2018-03-05 12:20:58 +00:00
|
|
|
uint16_t pkts_n);
|
2017-07-06 18:41:10 +00:00
|
|
|
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
/* mlx5_mr.c */
|
|
|
|
|
|
|
|
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
|
|
|
|
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
|
|
|
|
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
|
|
|
|
|
2018-07-12 12:01:31 +00:00
|
|
|
/**
|
|
|
|
* Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
|
|
|
|
* 64bit architectures.
|
|
|
|
*
|
|
|
|
* @param val
|
|
|
|
* value to write in CPU endian format.
|
|
|
|
* @param addr
|
|
|
|
* Address to write to.
|
|
|
|
* @param lock
|
|
|
|
* Address of the lock to use for that UAR access.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
|
|
|
__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
|
|
|
|
rte_spinlock_t *lock __rte_unused)
|
|
|
|
{
|
|
|
|
#ifdef RTE_ARCH_64
|
|
|
|
rte_write64_relaxed(val, addr);
|
|
|
|
#else /* !RTE_ARCH_64 */
|
|
|
|
rte_spinlock_lock(lock);
|
|
|
|
rte_write32_relaxed(val, addr);
|
|
|
|
rte_io_wmb();
|
|
|
|
rte_write32_relaxed(val >> 32,
|
|
|
|
(volatile void *)((volatile char *)addr + 4));
|
|
|
|
rte_spinlock_unlock(lock);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
|
|
|
|
* 64bit architectures while guaranteeing the order of execution with the
|
|
|
|
* code being executed.
|
|
|
|
*
|
|
|
|
* @param val
|
|
|
|
* value to write in CPU endian format.
|
|
|
|
* @param addr
|
|
|
|
* Address to write to.
|
|
|
|
* @param lock
|
|
|
|
* Address of the lock to use for that UAR access.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
|
|
|
__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
rte_io_wmb();
|
|
|
|
__mlx5_uar_write64_relaxed(val, addr, lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assist macros, used instead of directly calling the functions they wrap. */
|
|
|
|
#ifdef RTE_ARCH_64
|
|
|
|
#define mlx5_uar_write64_relaxed(val, dst, lock) \
|
|
|
|
__mlx5_uar_write64_relaxed(val, dst, NULL)
|
|
|
|
#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
|
|
|
|
#else
|
|
|
|
#define mlx5_uar_write64_relaxed(val, dst, lock) \
|
|
|
|
__mlx5_uar_write64_relaxed(val, dst, lock)
|
|
|
|
#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
|
|
|
|
#endif
|
|
|
|
|
2017-07-06 18:41:10 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
/**
|
|
|
|
* Verify or set magic value in CQE.
|
|
|
|
*
|
|
|
|
* @param cqe
|
|
|
|
* Pointer to CQE.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 the first time.
|
|
|
|
*/
|
|
|
|
static inline int
|
|
|
|
check_cqe_seen(volatile struct mlx5_cqe *cqe)
|
|
|
|
{
|
|
|
|
static const uint8_t magic[] = "seen";
|
2018-06-26 12:39:22 +00:00
|
|
|
volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1;
|
2017-07-06 18:41:10 +00:00
|
|
|
int ret = 1;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
|
|
|
|
if (!ret || (*buf)[i] != magic[i]) {
|
|
|
|
ret = 0;
|
|
|
|
(*buf)[i] = magic[i];
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif /* NDEBUG */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check whether CQE is valid.
|
|
|
|
*
|
|
|
|
* @param cqe
|
|
|
|
* Pointer to CQE.
|
|
|
|
* @param cqes_n
|
|
|
|
* Size of completion queue.
|
|
|
|
* @param ci
|
|
|
|
* Consumer index.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, 1 on failure.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline int
|
|
|
|
check_cqe(volatile struct mlx5_cqe *cqe,
|
|
|
|
unsigned int cqes_n, const uint16_t ci)
|
|
|
|
{
|
|
|
|
uint16_t idx = ci & cqes_n;
|
|
|
|
uint8_t op_own = cqe->op_own;
|
|
|
|
uint8_t op_owner = MLX5_CQE_OWNER(op_own);
|
|
|
|
uint8_t op_code = MLX5_CQE_OPCODE(op_own);
|
|
|
|
|
|
|
|
if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
|
|
|
|
return 1; /* No CQE. */
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if ((op_code == MLX5_CQE_RESP_ERR) ||
|
|
|
|
(op_code == MLX5_CQE_REQ_ERR)) {
|
|
|
|
volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
|
|
|
|
uint8_t syndrome = err_cqe->syndrome;
|
|
|
|
|
|
|
|
if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
|
|
|
|
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
|
|
|
|
return 0;
|
2017-08-23 07:10:58 +00:00
|
|
|
if (!check_cqe_seen(cqe)) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR,
|
|
|
|
"unexpected CQE error %u (0x%02x) syndrome"
|
|
|
|
" 0x%02x",
|
|
|
|
op_code, op_code, syndrome);
|
2017-08-23 07:10:58 +00:00
|
|
|
rte_hexdump(stderr, "MLX5 Error CQE:",
|
|
|
|
(const void *)((uintptr_t)err_cqe),
|
|
|
|
sizeof(*err_cqe));
|
|
|
|
}
|
2017-07-06 18:41:10 +00:00
|
|
|
return 1;
|
|
|
|
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
|
|
|
|
(op_code != MLX5_CQE_REQ)) {
|
2017-08-23 07:10:58 +00:00
|
|
|
if (!check_cqe_seen(cqe)) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
|
|
|
|
op_code, op_code);
|
2017-08-23 07:10:58 +00:00
|
|
|
rte_hexdump(stderr, "MLX5 CQE:",
|
|
|
|
(const void *)((uintptr_t)cqe),
|
|
|
|
sizeof(*cqe));
|
|
|
|
}
|
2017-07-06 18:41:10 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif /* NDEBUG */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the address of the WQE.
|
|
|
|
*
|
|
|
|
* @param txq
|
|
|
|
* Pointer to TX queue structure.
|
|
|
|
* @param wqe_ci
|
|
|
|
* WQE consumer index.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* WQE address.
|
|
|
|
*/
|
|
|
|
static inline uintptr_t *
|
2017-10-09 14:44:40 +00:00
|
|
|
tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
|
2017-07-06 18:41:10 +00:00
|
|
|
{
|
|
|
|
ci &= ((1 << txq->wqe_n) - 1);
|
|
|
|
return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Manage TX completions.
|
|
|
|
*
|
|
|
|
* When sending a burst, mlx5_tx_burst() posts several WRs.
|
|
|
|
*
|
|
|
|
* @param txq
|
|
|
|
* Pointer to TX queue structure.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
2017-10-09 14:44:40 +00:00
|
|
|
mlx5_tx_complete(struct mlx5_txq_data *txq)
|
2017-07-06 18:41:10 +00:00
|
|
|
{
|
|
|
|
const uint16_t elts_n = 1 << txq->elts_n;
|
|
|
|
const uint16_t elts_m = elts_n - 1;
|
|
|
|
const unsigned int cqe_n = 1 << txq->cqe_n;
|
|
|
|
const unsigned int cqe_cnt = cqe_n - 1;
|
|
|
|
uint16_t elts_free = txq->elts_tail;
|
|
|
|
uint16_t elts_tail;
|
|
|
|
uint16_t cq_ci = txq->cq_ci;
|
|
|
|
volatile struct mlx5_cqe *cqe = NULL;
|
|
|
|
volatile struct mlx5_wqe_ctrl *ctrl;
|
|
|
|
struct rte_mbuf *m, *free[elts_n];
|
|
|
|
struct rte_mempool *pool = NULL;
|
|
|
|
unsigned int blk_n = 0;
|
|
|
|
|
2017-07-20 15:48:35 +00:00
|
|
|
cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
|
|
|
|
if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
|
|
|
|
return;
|
2017-07-06 18:41:10 +00:00
|
|
|
#ifndef NDEBUG
|
2017-07-20 15:48:35 +00:00
|
|
|
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
|
|
|
|
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
|
2017-08-23 07:10:58 +00:00
|
|
|
if (!check_cqe_seen(cqe)) {
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
|
2017-08-23 07:10:58 +00:00
|
|
|
rte_hexdump(stderr, "MLX5 TXQ:",
|
|
|
|
(const void *)((uintptr_t)txq->wqes),
|
|
|
|
((1 << txq->wqe_n) *
|
|
|
|
MLX5_WQE_SIZE));
|
|
|
|
}
|
2017-07-06 18:41:10 +00:00
|
|
|
return;
|
2017-07-20 15:48:35 +00:00
|
|
|
}
|
|
|
|
#endif /* NDEBUG */
|
|
|
|
++cq_ci;
|
2017-09-17 10:42:02 +00:00
|
|
|
txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
|
2017-07-06 18:41:10 +00:00
|
|
|
ctrl = (volatile struct mlx5_wqe_ctrl *)
|
|
|
|
tx_mlx5_wqe(txq, txq->wqe_pi);
|
|
|
|
elts_tail = ctrl->ctrl3;
|
|
|
|
assert((elts_tail & elts_m) < (1 << txq->wqe_n));
|
|
|
|
/* Free buffers. */
|
|
|
|
while (elts_free != elts_tail) {
|
|
|
|
m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
|
|
|
|
if (likely(m != NULL)) {
|
|
|
|
if (likely(m->pool == pool)) {
|
|
|
|
free[blk_n++] = m;
|
|
|
|
} else {
|
|
|
|
if (likely(pool != NULL))
|
|
|
|
rte_mempool_put_bulk(pool,
|
|
|
|
(void *)free,
|
|
|
|
blk_n);
|
|
|
|
free[0] = m;
|
|
|
|
pool = m->pool;
|
|
|
|
blk_n = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (blk_n)
|
|
|
|
rte_mempool_put_bulk(pool, (void *)free, blk_n);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
elts_free = txq->elts_tail;
|
|
|
|
/* Poisoning. */
|
|
|
|
while (elts_free != elts_tail) {
|
|
|
|
memset(&(*txq->elts)[elts_free & elts_m],
|
|
|
|
0x66,
|
|
|
|
sizeof((*txq->elts)[elts_free & elts_m]));
|
|
|
|
++elts_free;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
txq->cq_ci = cq_ci;
|
|
|
|
txq->elts_tail = elts_tail;
|
|
|
|
/* Update the consumer index. */
|
2017-10-09 18:46:54 +00:00
|
|
|
rte_compiler_barrier();
|
2017-09-17 10:42:02 +00:00
|
|
|
*txq->cq_db = rte_cpu_to_be_32(cq_ci);
|
2017-07-06 18:41:10 +00:00
|
|
|
}
|
|
|
|
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
/**
|
|
|
|
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
|
|
|
|
* as mempool is pre-configured and static.
|
|
|
|
*
|
|
|
|
* @param rxq
|
|
|
|
* Pointer to Rx queue structure.
|
|
|
|
* @param addr
|
|
|
|
* Address to search.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Searched LKey on success, UINT32_MAX on no match.
|
|
|
|
*/
|
2017-07-06 18:41:10 +00:00
|
|
|
static __rte_always_inline uint32_t
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
|
2017-07-06 18:41:10 +00:00
|
|
|
{
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
|
|
|
|
uint32_t lkey;
|
|
|
|
|
|
|
|
/* Linear search on MR cache array. */
|
|
|
|
lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
|
|
|
|
MLX5_MR_CACHE_N, addr);
|
|
|
|
if (likely(lkey != UINT32_MAX))
|
|
|
|
return lkey;
|
|
|
|
/* Take slower bottom-half (Binary Search) on miss. */
|
|
|
|
return mlx5_rx_addr2mr_bh(rxq, addr);
|
2017-07-06 18:41:10 +00:00
|
|
|
}
|
|
|
|
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Query LKey from a packet buffer for Tx. If not found, add the mempool.
|
|
|
|
*
|
|
|
|
* @param txq
|
|
|
|
* Pointer to Tx queue structure.
|
|
|
|
* @param addr
|
|
|
|
* Address to search.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Searched LKey on success, UINT32_MAX on no match.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline uint32_t
|
|
|
|
mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
|
|
|
|
{
|
|
|
|
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
|
|
|
|
uint32_t lkey;
|
|
|
|
|
|
|
|
/* Check generation bit to see if there's any change on existing MRs. */
|
|
|
|
if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
|
|
|
|
mlx5_mr_flush_local_cache(mr_ctrl);
|
|
|
|
/* Linear search on MR cache array. */
|
|
|
|
lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
|
|
|
|
MLX5_MR_CACHE_N, addr);
|
|
|
|
if (likely(lkey != UINT32_MAX))
|
|
|
|
return lkey;
|
|
|
|
/* Take slower bottom-half (binary search) on miss. */
|
|
|
|
return mlx5_tx_addr2mr_bh(txq, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
|
|
|
|
|
2017-07-06 18:41:10 +00:00
|
|
|
/**
|
2017-10-25 00:27:25 +00:00
|
|
|
* Ring TX queue doorbell and flush the update if requested.
|
2017-07-06 18:41:10 +00:00
|
|
|
*
|
|
|
|
* @param txq
|
|
|
|
* Pointer to TX queue structure.
|
|
|
|
* @param wqe
|
|
|
|
* Pointer to the last WQE posted in the NIC.
|
2017-10-25 00:27:25 +00:00
|
|
|
* @param cond
|
|
|
|
* Request for write memory barrier after BlueFlame update.
|
2017-07-06 18:41:10 +00:00
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
2017-10-25 00:27:25 +00:00
|
|
|
mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
|
|
|
|
int cond)
|
2017-07-06 18:41:10 +00:00
|
|
|
{
|
|
|
|
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
|
|
|
|
volatile uint64_t *src = ((volatile uint64_t *)wqe);
|
|
|
|
|
2018-01-25 21:02:49 +00:00
|
|
|
rte_cio_wmb();
|
2017-09-17 10:42:02 +00:00
|
|
|
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
|
2017-07-06 18:41:10 +00:00
|
|
|
/* Ensure ordering between DB record and BF copy. */
|
|
|
|
rte_wmb();
|
2018-07-12 12:01:31 +00:00
|
|
|
mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
|
2017-10-25 00:27:25 +00:00
|
|
|
if (cond)
|
|
|
|
rte_wmb();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Ring TX queue doorbell and flush the update by write memory barrier.
|
|
|
|
*
|
|
|
|
* @param txq
|
|
|
|
* Pointer to TX queue structure.
|
|
|
|
* @param wqe
|
|
|
|
* Pointer to the last WQE posted in the NIC.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
|
|
|
mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
|
|
|
|
{
|
|
|
|
mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
|
2017-07-06 18:41:10 +00:00
|
|
|
}
|
|
|
|
|
2017-11-20 15:35:47 +00:00
|
|
|
/**
|
2018-04-08 12:41:20 +00:00
|
|
|
* Convert mbuf to Verb SWP.
|
2017-11-20 15:35:47 +00:00
|
|
|
*
|
|
|
|
* @param txq_data
|
|
|
|
* Pointer to the Tx queue.
|
|
|
|
* @param buf
|
|
|
|
* Pointer to the mbuf.
|
2018-04-08 12:41:20 +00:00
|
|
|
* @param tso
|
|
|
|
* TSO offloads enabled.
|
|
|
|
* @param vlan
|
|
|
|
* VLAN offloads enabled
|
|
|
|
* @param offsets
|
|
|
|
* Pointer to the SWP header offsets.
|
|
|
|
* @param swp_types
|
|
|
|
* Pointer to the SWP header types.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline void
|
|
|
|
txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
|
2018-05-22 08:59:43 +00:00
|
|
|
uint8_t *offsets, uint8_t *swp_types)
|
2018-04-08 12:41:20 +00:00
|
|
|
{
|
2018-05-22 08:59:43 +00:00
|
|
|
const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
|
|
|
|
const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
|
|
|
|
const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
|
2018-05-22 08:59:42 +00:00
|
|
|
const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
|
|
|
|
const uint64_t inner_ip =
|
|
|
|
buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
|
2018-04-08 12:41:20 +00:00
|
|
|
const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
|
|
|
|
PKT_TX_OUTER_IPV6;
|
2018-05-22 08:59:42 +00:00
|
|
|
uint16_t idx;
|
|
|
|
uint16_t off;
|
2018-04-08 12:41:20 +00:00
|
|
|
|
2018-05-22 08:59:43 +00:00
|
|
|
if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
|
|
|
|
tunnel != PKT_TX_TUNNEL_IP)))
|
2018-04-08 12:41:20 +00:00
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* The index should have:
|
|
|
|
* bit[0:1] = PKT_TX_L4_MASK
|
|
|
|
* bit[4] = PKT_TX_IPV6
|
|
|
|
* bit[8] = PKT_TX_OUTER_IPV6
|
|
|
|
* bit[9] = PKT_TX_OUTER_UDP
|
|
|
|
*/
|
|
|
|
idx = (buf->ol_flags & ol_flags_mask) >> 52;
|
|
|
|
if (tunnel == PKT_TX_TUNNEL_UDP)
|
|
|
|
idx |= 1 << 9;
|
|
|
|
*swp_types = mlx5_swp_types_table[idx];
|
2018-05-22 08:59:42 +00:00
|
|
|
/*
|
|
|
|
* Set offsets for SW parser. Since ConnectX-5, SW parser just
|
|
|
|
* complements HW parser. SW parser starts to engage only if HW parser
|
|
|
|
* can't reach a header. For the older devices, HW parser will not kick
|
|
|
|
* in if any of SWP offsets is set. Therefore, all of the L3 offsets
|
|
|
|
* should be set regardless of HW offload.
|
|
|
|
*/
|
2018-05-22 08:59:43 +00:00
|
|
|
off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
|
2018-05-22 08:59:42 +00:00
|
|
|
offsets[1] = off >> 1; /* Outer L3 offset. */
|
2018-05-23 11:27:09 +00:00
|
|
|
off += buf->outer_l3_len;
|
|
|
|
if (tunnel == PKT_TX_TUNNEL_UDP)
|
2018-05-22 08:59:42 +00:00
|
|
|
offsets[0] = off >> 1; /* Outer L4 offset. */
|
|
|
|
if (inner_ip) {
|
|
|
|
off += buf->l2_len;
|
|
|
|
offsets[3] = off >> 1; /* Inner L3 offset. */
|
|
|
|
if (csum_flags == PKT_TX_TCP_CKSUM || tso ||
|
|
|
|
csum_flags == PKT_TX_UDP_CKSUM) {
|
|
|
|
off += buf->l3_len;
|
|
|
|
offsets[2] = off >> 1; /* Inner L4 offset. */
|
|
|
|
}
|
|
|
|
}
|
2018-04-08 12:41:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Convert the Checksum offloads to Verbs.
|
|
|
|
*
|
|
|
|
* @param buf
|
|
|
|
* Pointer to the mbuf.
|
2017-11-20 15:35:47 +00:00
|
|
|
*
|
|
|
|
* @return
|
2018-04-08 12:41:20 +00:00
|
|
|
* Converted checksum flags.
|
2017-11-20 15:35:47 +00:00
|
|
|
*/
|
|
|
|
static __rte_always_inline uint8_t
|
2018-04-08 12:41:20 +00:00
|
|
|
txq_ol_cksum_to_cs(struct rte_mbuf *buf)
|
2017-11-20 15:35:47 +00:00
|
|
|
{
|
2018-04-08 12:41:20 +00:00
|
|
|
uint32_t idx;
|
|
|
|
uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
|
|
|
|
const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
|
|
|
|
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The index should have:
|
|
|
|
* bit[0] = PKT_TX_TCP_SEG
|
|
|
|
* bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
|
|
|
|
* bit[4] = PKT_TX_IP_CKSUM
|
|
|
|
* bit[8] = PKT_TX_OUTER_IP_CKSUM
|
|
|
|
* bit[9] = tunnel
|
|
|
|
*/
|
|
|
|
idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
|
|
|
|
return mlx5_cksum_table[idx];
|
2017-11-20 15:35:47 +00:00
|
|
|
}
|
|
|
|
|
2017-12-27 03:55:46 +00:00
|
|
|
/**
|
|
|
|
* Count the number of contiguous single segment packets.
|
|
|
|
*
|
|
|
|
* @param pkts
|
|
|
|
* Pointer to array of packets.
|
|
|
|
* @param pkts_n
|
|
|
|
* Number of packets.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Number of contiguous single segment packets.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline unsigned int
|
|
|
|
txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
{
|
|
|
|
unsigned int pos;
|
|
|
|
|
|
|
|
if (!pkts_n)
|
|
|
|
return 0;
|
|
|
|
/* Count the number of contiguous single segment packets. */
|
|
|
|
for (pos = 0; pos < pkts_n; ++pos)
|
|
|
|
if (NB_SEGS(pkts[pos]) > 1)
|
|
|
|
break;
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Count the number of contiguous multi-segment packets.
|
|
|
|
*
|
|
|
|
* @param pkts
|
|
|
|
* Pointer to array of packets.
|
|
|
|
* @param pkts_n
|
|
|
|
* Number of packets.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Number of contiguous multi-segment packets.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline unsigned int
|
|
|
|
txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
{
|
|
|
|
unsigned int pos;
|
|
|
|
|
|
|
|
if (!pkts_n)
|
|
|
|
return 0;
|
|
|
|
/* Count the number of contiguous multi-segment packets. */
|
|
|
|
for (pos = 0; pos < pkts_n; ++pos)
|
|
|
|
if (NB_SEGS(pkts[pos]) == 1)
|
|
|
|
break;
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2015-10-30 18:52:31 +00:00
|
|
|
#endif /* RTE_PMD_MLX5_RXTX_H_ */
|