raw/ioat: add API to query remaining ring space
Add a new API to query remaining descriptor ring capacity. This API is useful, for example, when an application needs to enqueue a fragmented packet and wants to ensure that all segments of the packet will be enqueued together. Signed-off-by: Kevin Laatz <kevin.laatz@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
This commit is contained in:
parent
74464005a2
commit
29cf9c1194
@ -277,6 +277,70 @@ test_enqueue_fill(int dev_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
test_burst_capacity(int dev_id)
|
||||
{
|
||||
#define BURST_SIZE 64
|
||||
const unsigned int ring_space = rte_ioat_burst_capacity(dev_id);
|
||||
struct rte_mbuf *src, *dst;
|
||||
unsigned int length = 1024;
|
||||
unsigned int i, j, iter;
|
||||
unsigned int old_cap, cap;
|
||||
uintptr_t completions[BURST_SIZE];
|
||||
|
||||
src = rte_pktmbuf_alloc(pool);
|
||||
dst = rte_pktmbuf_alloc(pool);
|
||||
|
||||
old_cap = ring_space;
|
||||
/* to test capacity, we enqueue elements and check capacity is reduced
|
||||
* by one each time - rebaselining the expected value after each burst
|
||||
* as the capacity is only for a burst. We enqueue multiple bursts to
|
||||
* fill up half the ring, before emptying it again. We do this twice to
|
||||
* ensure that we get to test scenarios where we get ring wrap-around
|
||||
*/
|
||||
for (iter = 0; iter < 2; iter++) {
|
||||
for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
|
||||
cap = rte_ioat_burst_capacity(dev_id);
|
||||
if (cap > old_cap) {
|
||||
PRINT_ERR("Error, avail ring capacity has gone up, not down\n");
|
||||
return -1;
|
||||
}
|
||||
old_cap = cap;
|
||||
|
||||
for (j = 0; j < BURST_SIZE; j++) {
|
||||
if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src),
|
||||
rte_pktmbuf_iova(dst), length, 0, 0) != 1) {
|
||||
PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
|
||||
return -1;
|
||||
}
|
||||
if (cap - rte_ioat_burst_capacity(dev_id) != j + 1) {
|
||||
PRINT_ERR("Error, ring capacity did not change as expected\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
rte_ioat_perform_ops(dev_id);
|
||||
}
|
||||
usleep(100);
|
||||
for (i = 0; i < ring_space / (2 * BURST_SIZE); i++) {
|
||||
if (rte_ioat_completed_ops(dev_id, BURST_SIZE,
|
||||
completions, completions) != BURST_SIZE) {
|
||||
PRINT_ERR("Error with completions\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (rte_ioat_burst_capacity(dev_id) != ring_space) {
|
||||
PRINT_ERR("Error, ring capacity has not reset to original value\n");
|
||||
return -1;
|
||||
}
|
||||
old_cap = ring_space;
|
||||
}
|
||||
|
||||
rte_pktmbuf_free(src);
|
||||
rte_pktmbuf_free(dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
ioat_rawdev_test(uint16_t dev_id)
|
||||
{
|
||||
@ -321,7 +385,7 @@ ioat_rawdev_test(uint16_t dev_id)
|
||||
}
|
||||
|
||||
pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL",
|
||||
256, /* n == num elements */
|
||||
p.ring_size * 2, /* n == num elements */
|
||||
32, /* cache size */
|
||||
0, /* priv size */
|
||||
2048, /* data room size */
|
||||
@ -385,6 +449,10 @@ ioat_rawdev_test(uint16_t dev_id)
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
printf("Running Burst Capacity Test\n");
|
||||
if (test_burst_capacity(dev_id) != 0)
|
||||
goto err;
|
||||
|
||||
rte_rawdev_stop(dev_id);
|
||||
if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) {
|
||||
PRINT_ERR("Error resetting xstat values\n");
|
||||
|
@ -117,6 +117,28 @@ struct rte_idxd_rawdev {
|
||||
struct rte_idxd_user_hdl *hdl_ring;
|
||||
};
|
||||
|
||||
static __rte_always_inline uint16_t
|
||||
__idxd_burst_capacity(int dev_id)
|
||||
{
|
||||
struct rte_idxd_rawdev *idxd =
|
||||
(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;
|
||||
uint16_t write_idx = idxd->batch_start + idxd->batch_size;
|
||||
uint16_t used_space;
|
||||
|
||||
/* Check for space in the batch ring */
|
||||
if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
|
||||
idxd->batch_idx_write + 1 == idxd->batch_idx_read)
|
||||
return 0;
|
||||
|
||||
/* for descriptors, check for wrap-around on write but not read */
|
||||
if (idxd->hdls_read > write_idx)
|
||||
write_idx += idxd->desc_ring_mask + 1;
|
||||
used_space = write_idx - idxd->hdls_read;
|
||||
|
||||
/* Return amount of free space in the descriptor ring */
|
||||
return idxd->desc_ring_mask - used_space;
|
||||
}
|
||||
|
||||
static __rte_always_inline rte_iova_t
|
||||
__desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n)
|
||||
{
|
||||
|
@ -111,6 +111,19 @@ struct rte_ioat_rawdev {
|
||||
#define RTE_IOAT_CHANSTS_HALTED 0x3
|
||||
#define RTE_IOAT_CHANSTS_ARMED 0x4
|
||||
|
||||
static __rte_always_inline uint16_t
|
||||
__ioat_burst_capacity(int dev_id)
|
||||
{
|
||||
struct rte_ioat_rawdev *ioat =
|
||||
(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
|
||||
unsigned short size = ioat->ring_size - 1;
|
||||
unsigned short read = ioat->next_read;
|
||||
unsigned short write = ioat->next_write;
|
||||
unsigned short space = size - (write - read);
|
||||
|
||||
return space;
|
||||
}
|
||||
|
||||
static __rte_always_inline int
|
||||
__ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
|
||||
unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
|
||||
@ -271,6 +284,17 @@ end:
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
rte_ioat_burst_capacity(int dev_id)
|
||||
{
|
||||
enum rte_ioat_dev_type *type =
|
||||
(enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
|
||||
if (*type == RTE_IDXD_DEV)
|
||||
return __idxd_burst_capacity(dev_id);
|
||||
else
|
||||
return __ioat_burst_capacity(dev_id);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
|
||||
unsigned int len, uintptr_t dst_hdl)
|
||||
|
Loading…
x
Reference in New Issue
Block a user