numam-dpdk/lib/ethdev/ethdev_private.h
Jerin Jacob 6b81dddbb9 ethdev: support congestion management
NIC HW controllers often come with congestion management support on
various HW objects such as Rx queue depth or mempool queue depth.

Also, it can support various modes of operation such as RED
(Random early discard), WRED etc on those HW objects.

Add a framework to express such modes(enum rte_cman_mode) and
introduce (enum rte_eth_cman_obj) to enumerate the different
objects where the modes can operate on.

Add RTE_CMAN_RED mode of operation and RTE_ETH_CMAN_OBJ_RX_QUEUE,
RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL objects.

Introduce reserved fields in configuration structure
backed by rte_eth_cman_config_init() to add new configuration
parameters without ABI breakage.

Add rte_eth_cman_info_get() API to get the information such as
supported modes and objects.

Add rte_eth_cman_config_init(), rte_eth_cman_config_set() APIs
to configure congestion management on those object with associated mode.

Finally, add rte_eth_cman_config_get() API to retrieve the
applied configuration.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Sunil Kumar Kori <skori@marvell.com>
2022-10-07 11:50:28 +02:00

76 lines
2.3 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Gaëtan Rivet
*/
#ifndef _ETH_PRIVATE_H_
#define _ETH_PRIVATE_H_
#include <sys/queue.h>
#include <rte_malloc.h>
#include <rte_os_shim.h>
#include "rte_ethdev.h"
struct eth_dev_shared {
uint64_t next_owner_id;
rte_spinlock_t ownership_lock;
struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
};
extern struct eth_dev_shared *eth_dev_shared_data;
/**
* The user application callback description.
*
* It contains callback address to be registered by user application,
* the pointer to the parameters for callback, and the event type.
*/
struct rte_eth_dev_callback {
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
void *ret_param; /**< Return parameter */
enum rte_eth_event_type event; /**< Interrupt event type */
uint32_t active; /**< Callback is executing */
};
extern rte_spinlock_t eth_dev_cb_lock;
/* Convert all error to -EIO if device is removed. */
int eth_err(uint16_t port_id, int ret);
/*
* Convert rte_eth_dev pointer to port ID.
* NULL will be translated to RTE_MAX_ETHPORTS.
*/
uint16_t eth_dev_to_id(const struct rte_eth_dev *dev);
/* Generic rte_eth_dev comparison function. */
typedef int (*rte_eth_cmp_t)(const struct rte_eth_dev *, const void *);
/* Generic rte_eth_dev iterator. */
struct rte_eth_dev *
eth_find_device(const struct rte_eth_dev *_start, rte_eth_cmp_t cmp,
const void *data);
/* Parse devargs value for representor parameter. */
int rte_eth_devargs_parse_representor_ports(char *str, void *data);
/* reset eth fast-path API to dummy values */
void eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo);
/* setup eth fast-path API to ethdev values */
void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
const struct rte_eth_dev *dev);
void eth_dev_shared_data_prepare(void);
void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid);
void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid);
int eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues);
int eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues);
#endif /* _ETH_PRIVATE_H_ */