lib: fix various compilation warnings
Signed-off-by: Intel
This commit is contained in:
parent
3b46fb77eb
commit
a974564b34
@ -231,7 +231,7 @@ split_buffer_search(struct rte_pm_ctx * pmx, struct rte_pm_build_opt * bopt)
|
|||||||
|
|
||||||
int len, seg_len, total_len;
|
int len, seg_len, total_len;
|
||||||
int i, j, n_seg;
|
int i, j, n_seg;
|
||||||
int cur_match, num_matches, total_matches;
|
int cur_match, num_matches, total_matches = 0;
|
||||||
|
|
||||||
/* chain matching */
|
/* chain matching */
|
||||||
for (i = 0; i < pm_test_buf_len; i++) {
|
for (i = 0; i < pm_test_buf_len; i++) {
|
||||||
@ -334,7 +334,7 @@ single_buffer_search(struct rte_pm_ctx * pmx, struct rte_pm_build_opt * bopt)
|
|||||||
struct rte_pm_inbuf in_buf;
|
struct rte_pm_inbuf in_buf;
|
||||||
|
|
||||||
int i, j, len;
|
int i, j, len;
|
||||||
int match, num_matches, total_matches;
|
int match, num_matches, total_matches = 0;
|
||||||
|
|
||||||
/* look at same segment three times */
|
/* look at same segment three times */
|
||||||
for (i = 0; i < pm_test_buf_len; i++) {
|
for (i = 0; i < pm_test_buf_len; i++) {
|
||||||
|
@ -123,7 +123,6 @@ static uint32_t l2fwd_dst_ports[L2FWD_MAX_PORTS];
|
|||||||
|
|
||||||
static unsigned int l2fwd_rx_queue_per_lcore = 1;
|
static unsigned int l2fwd_rx_queue_per_lcore = 1;
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
|
||||||
struct mbuf_table {
|
struct mbuf_table {
|
||||||
unsigned len;
|
unsigned len;
|
||||||
struct rte_mbuf *m_table[MAX_PKT_BURST];
|
struct rte_mbuf *m_table[MAX_PKT_BURST];
|
||||||
@ -586,8 +585,8 @@ MAIN(int argc, char **argv)
|
|||||||
struct lcore_queue_conf *qconf;
|
struct lcore_queue_conf *qconf;
|
||||||
struct rte_eth_dev_info dev_info;
|
struct rte_eth_dev_info dev_info;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int nb_ports;
|
uint8_t nb_ports;
|
||||||
unsigned portid, last_port;
|
uint8_t portid, last_port;
|
||||||
unsigned lcore_id, rx_lcore_id;
|
unsigned lcore_id, rx_lcore_id;
|
||||||
unsigned nb_ports_in_mask = 0;
|
unsigned nb_ports_in_mask = 0;
|
||||||
|
|
||||||
@ -650,7 +649,7 @@ MAIN(int argc, char **argv)
|
|||||||
|
|
||||||
nb_ports_in_mask++;
|
nb_ports_in_mask++;
|
||||||
|
|
||||||
rte_eth_dev_info_get((uint8_t) portid, &dev_info);
|
rte_eth_dev_info_get(portid, &dev_info);
|
||||||
}
|
}
|
||||||
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2) {
|
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2) {
|
||||||
printf("Notice: odd number of ports in portmask.\n");
|
printf("Notice: odd number of ports in portmask.\n");
|
||||||
@ -681,55 +680,55 @@ MAIN(int argc, char **argv)
|
|||||||
|
|
||||||
qconf->rx_port_list[qconf->n_rx_port] = portid;
|
qconf->rx_port_list[qconf->n_rx_port] = portid;
|
||||||
qconf->n_rx_port++;
|
qconf->n_rx_port++;
|
||||||
printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
|
printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialise each port */
|
/* Initialise each port */
|
||||||
for (portid = 0; portid < nb_ports; portid++) {
|
for (portid = 0; portid < nb_ports; portid++) {
|
||||||
/* skip ports that are not enabled */
|
/* skip ports that are not enabled */
|
||||||
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
|
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
|
||||||
printf("Skipping disabled port %u\n", portid);
|
printf("Skipping disabled port %u\n", (unsigned) portid);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* init port */
|
/* init port */
|
||||||
printf("Initializing port %u... ", portid);
|
printf("Initializing port %u... ", (unsigned) portid);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_dev_configure((uint8_t) portid, 1, 1, &port_conf);
|
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
rte_eth_macaddr_get((uint8_t) portid,&l2fwd_ports_eth_addr[portid]);
|
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
|
||||||
|
|
||||||
/* init one RX queue */
|
/* init one RX queue */
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
|
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
|
||||||
SOCKET0, &rx_conf,
|
rte_eth_dev_socket_id(portid), &rx_conf,
|
||||||
l2fwd_pktmbuf_pool);
|
l2fwd_pktmbuf_pool);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
/* init one TX queue logical core on each port */
|
/* init one TX queue logical core on each port */
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd,
|
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
|
||||||
SOCKET0, &tx_conf);
|
SOCKET0, &tx_conf);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
/* Start device */
|
/* Start device */
|
||||||
ret = rte_eth_dev_start((uint8_t) portid);
|
ret = rte_eth_dev_start(portid);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
printf("done: \n");
|
printf("done: \n");
|
||||||
|
|
||||||
rte_eth_promiscuous_enable((uint8_t)portid);
|
rte_eth_promiscuous_enable(portid);
|
||||||
|
|
||||||
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
|
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
|
||||||
portid,
|
(unsigned) portid,
|
||||||
l2fwd_ports_eth_addr[portid].addr_bytes[0],
|
l2fwd_ports_eth_addr[portid].addr_bytes[0],
|
||||||
l2fwd_ports_eth_addr[portid].addr_bytes[1],
|
l2fwd_ports_eth_addr[portid].addr_bytes[1],
|
||||||
l2fwd_ports_eth_addr[portid].addr_bytes[2],
|
l2fwd_ports_eth_addr[portid].addr_bytes[2],
|
||||||
@ -741,7 +740,7 @@ MAIN(int argc, char **argv)
|
|||||||
memset(&port_statistics, 0, sizeof(port_statistics));
|
memset(&port_statistics, 0, sizeof(port_statistics));
|
||||||
}
|
}
|
||||||
|
|
||||||
check_all_ports_link_status((uint8_t)nb_ports, l2fwd_enabled_port_mask);
|
check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
|
||||||
|
|
||||||
/* launch per-lcore init on every lcore */
|
/* launch per-lcore init on every lcore */
|
||||||
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
|
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
|
||||||
|
@ -639,8 +639,8 @@ MAIN(int argc, char **argv)
|
|||||||
struct lcore_queue_conf *qconf;
|
struct lcore_queue_conf *qconf;
|
||||||
struct rte_eth_dev_info dev_info;
|
struct rte_eth_dev_info dev_info;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int nb_ports;
|
uint8_t nb_ports;
|
||||||
unsigned portid, portid_last = 0;
|
uint8_t portid, portid_last = 0;
|
||||||
unsigned lcore_id, rx_lcore_id;
|
unsigned lcore_id, rx_lcore_id;
|
||||||
unsigned nb_ports_in_mask = 0;
|
unsigned nb_ports_in_mask = 0;
|
||||||
|
|
||||||
@ -699,7 +699,7 @@ MAIN(int argc, char **argv)
|
|||||||
|
|
||||||
nb_ports_in_mask++;
|
nb_ports_in_mask++;
|
||||||
|
|
||||||
rte_eth_dev_info_get((uint8_t) portid, &dev_info);
|
rte_eth_dev_info_get(portid, &dev_info);
|
||||||
}
|
}
|
||||||
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
|
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
|
||||||
rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
|
rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
|
||||||
@ -730,61 +730,61 @@ MAIN(int argc, char **argv)
|
|||||||
|
|
||||||
qconf->rx_port_list[qconf->n_rx_port] = portid;
|
qconf->rx_port_list[qconf->n_rx_port] = portid;
|
||||||
qconf->n_rx_port++;
|
qconf->n_rx_port++;
|
||||||
printf("Lcore %u: RX port %u\n",rx_lcore_id, portid);
|
printf("Lcore %u: RX port %u\n",rx_lcore_id, (unsigned) portid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialise each port */
|
/* Initialise each port */
|
||||||
for (portid = 0; portid < nb_ports; portid++) {
|
for (portid = 0; portid < nb_ports; portid++) {
|
||||||
/* skip ports that are not enabled */
|
/* skip ports that are not enabled */
|
||||||
if ((lsi_enabled_port_mask & (1 << portid)) == 0) {
|
if ((lsi_enabled_port_mask & (1 << portid)) == 0) {
|
||||||
printf("Skipping disabled port %u\n", portid);
|
printf("Skipping disabled port %u\n", (unsigned) portid);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* init port */
|
/* init port */
|
||||||
printf("Initializing port %u... ", portid);
|
printf("Initializing port %u... ", (unsigned) portid);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_dev_configure((uint8_t) portid, 1, 1, &port_conf);
|
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
/* register lsi interrupt callback, need to be after
|
/* register lsi interrupt callback, need to be after
|
||||||
* rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
|
* rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
|
||||||
* lsc interrupt will be present, and below callback to
|
* lsc interrupt will be present, and below callback to
|
||||||
* be registered will never be called.
|
* be registered will never be called.
|
||||||
*/
|
*/
|
||||||
rte_eth_dev_callback_register((uint8_t)portid,
|
rte_eth_dev_callback_register(portid,
|
||||||
RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
|
RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
|
||||||
|
|
||||||
rte_eth_macaddr_get((uint8_t) portid,
|
rte_eth_macaddr_get(portid,
|
||||||
&lsi_ports_eth_addr[portid]);
|
&lsi_ports_eth_addr[portid]);
|
||||||
|
|
||||||
/* init one RX queue */
|
/* init one RX queue */
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
|
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
|
||||||
SOCKET0, &rx_conf,
|
SOCKET0, &rx_conf,
|
||||||
lsi_pktmbuf_pool);
|
lsi_pktmbuf_pool);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
/* init one TX queue logical core on each port */
|
/* init one TX queue logical core on each port */
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd,
|
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
|
||||||
SOCKET0, &tx_conf);
|
SOCKET0, &tx_conf);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
|
|
||||||
/* Start device */
|
/* Start device */
|
||||||
ret = rte_eth_dev_start((uint8_t) portid);
|
ret = rte_eth_dev_start(portid);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
|
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
|
||||||
ret, portid);
|
ret, (unsigned) portid);
|
||||||
printf("done:\n");
|
printf("done:\n");
|
||||||
|
|
||||||
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
|
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
|
||||||
portid,
|
(unsigned) portid,
|
||||||
lsi_ports_eth_addr[portid].addr_bytes[0],
|
lsi_ports_eth_addr[portid].addr_bytes[0],
|
||||||
lsi_ports_eth_addr[portid].addr_bytes[1],
|
lsi_ports_eth_addr[portid].addr_bytes[1],
|
||||||
lsi_ports_eth_addr[portid].addr_bytes[2],
|
lsi_ports_eth_addr[portid].addr_bytes[2],
|
||||||
@ -796,7 +796,7 @@ MAIN(int argc, char **argv)
|
|||||||
memset(&port_statistics, 0, sizeof(port_statistics));
|
memset(&port_statistics, 0, sizeof(port_statistics));
|
||||||
}
|
}
|
||||||
|
|
||||||
check_all_ports_link_status((uint8_t)nb_ports, lsi_enabled_port_mask);
|
check_all_ports_link_status(nb_ports, lsi_enabled_port_mask);
|
||||||
|
|
||||||
/* launch per-lcore init on every lcore */
|
/* launch per-lcore init on every lcore */
|
||||||
rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
|
rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
|
||||||
|
@ -75,7 +75,7 @@ static void cmd_send_parsed(void *parsed_result,
|
|||||||
__attribute__((unused)) struct cmdline *cl,
|
__attribute__((unused)) struct cmdline *cl,
|
||||||
__attribute__((unused)) void *data)
|
__attribute__((unused)) void *data)
|
||||||
{
|
{
|
||||||
void *msg;
|
void *msg = NULL;
|
||||||
struct cmd_send_result *res = parsed_result;
|
struct cmd_send_result *res = parsed_result;
|
||||||
|
|
||||||
if (rte_mempool_get(message_pool, &msg) < 0)
|
if (rte_mempool_get(message_pool, &msg) < 0)
|
||||||
|
@ -122,7 +122,7 @@ rte_align_floor_int(uintptr_t ptr, uintptr_t align)
|
|||||||
* power-of-two value.
|
* power-of-two value.
|
||||||
*/
|
*/
|
||||||
#define RTE_ALIGN_FLOOR(val, align) \
|
#define RTE_ALIGN_FLOOR(val, align) \
|
||||||
(typeof(val))(val & (~((typeof(val))(align - 1))))
|
(typeof(val))((val) & (~((typeof(val))((align) - 1))))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Macro to align a pointer to a given power-of-two. The resultant
|
* Macro to align a pointer to a given power-of-two. The resultant
|
||||||
@ -131,7 +131,7 @@ rte_align_floor_int(uintptr_t ptr, uintptr_t align)
|
|||||||
* must be a power-of-two value.
|
* must be a power-of-two value.
|
||||||
*/
|
*/
|
||||||
#define RTE_PTR_ALIGN_CEIL(ptr, align) \
|
#define RTE_PTR_ALIGN_CEIL(ptr, align) \
|
||||||
RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(ptr, align - 1), align)
|
RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(ptr, (align) - 1), align)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Macro to align a value to a given power-of-two. The resultant value
|
* Macro to align a value to a given power-of-two. The resultant value
|
||||||
@ -140,7 +140,7 @@ rte_align_floor_int(uintptr_t ptr, uintptr_t align)
|
|||||||
* value.
|
* value.
|
||||||
*/
|
*/
|
||||||
#define RTE_ALIGN_CEIL(val, align) \
|
#define RTE_ALIGN_CEIL(val, align) \
|
||||||
RTE_ALIGN_FLOOR((val + ((typeof(val)) align - 1)), align)
|
RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Macro to align a pointer to a given power-of-two. The resultant
|
* Macro to align a pointer to a given power-of-two. The resultant
|
||||||
|
@ -615,7 +615,7 @@ parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
|
|||||||
|
|
||||||
/* now convert to int values */
|
/* now convert to int values */
|
||||||
errno = 0;
|
errno = 0;
|
||||||
*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
|
*domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);
|
||||||
*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
|
*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
|
||||||
*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
|
*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
|
||||||
*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
|
*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
|
||||||
|
@ -263,19 +263,19 @@ kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
|
|||||||
kni->mbuf_size = dev_info.mbuf_size;
|
kni->mbuf_size = dev_info.mbuf_size;
|
||||||
|
|
||||||
KNI_PRINT("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
|
KNI_PRINT("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
|
||||||
dev_info.tx_phys, kni->tx_q);
|
(unsigned long long) dev_info.tx_phys, kni->tx_q);
|
||||||
KNI_PRINT("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
|
KNI_PRINT("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
|
||||||
dev_info.rx_phys, kni->rx_q);
|
(unsigned long long) dev_info.rx_phys, kni->rx_q);
|
||||||
KNI_PRINT("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
|
KNI_PRINT("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
|
||||||
dev_info.alloc_phys, kni->alloc_q);
|
(unsigned long long) dev_info.alloc_phys, kni->alloc_q);
|
||||||
KNI_PRINT("free_phys: 0x%016llx, free_q addr: 0x%p\n",
|
KNI_PRINT("free_phys: 0x%016llx, free_q addr: 0x%p\n",
|
||||||
dev_info.free_phys, kni->free_q);
|
(unsigned long long) dev_info.free_phys, kni->free_q);
|
||||||
KNI_PRINT("req_phys: 0x%016llx, req_q addr: 0x%p\n",
|
KNI_PRINT("req_phys: 0x%016llx, req_q addr: 0x%p\n",
|
||||||
dev_info.req_phys, kni->req_q);
|
(unsigned long long) dev_info.req_phys, kni->req_q);
|
||||||
KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
|
KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
|
||||||
dev_info.resp_phys, kni->resp_q);
|
(unsigned long long) dev_info.resp_phys, kni->resp_q);
|
||||||
KNI_PRINT("mbuf_phys: 0x%016llx, mbuf_kva: 0x%p\n",
|
KNI_PRINT("mbuf_phys: 0x%016llx, mbuf_kva: 0x%p\n",
|
||||||
dev_info.mbuf_phys, kni->mbuf_kva);
|
(unsigned long long) dev_info.mbuf_phys, kni->mbuf_kva);
|
||||||
KNI_PRINT("mbuf_va: 0x%p\n", dev_info.mbuf_va);
|
KNI_PRINT("mbuf_va: 0x%p\n", dev_info.mbuf_va);
|
||||||
KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
|
KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
|
||||||
|
|
||||||
|
@ -40,6 +40,25 @@ LIB = librte_pmd_e1000.a
|
|||||||
CFLAGS += -O3
|
CFLAGS += -O3
|
||||||
CFLAGS += $(WERROR_FLAGS)
|
CFLAGS += $(WERROR_FLAGS)
|
||||||
|
|
||||||
|
ifeq ($(CC), icc)
|
||||||
|
#
|
||||||
|
# CFLAGS for icc
|
||||||
|
#
|
||||||
|
CFLAGS_LAD = -wd177 -wd181 -wd188 -wd869 -wd2259
|
||||||
|
else
|
||||||
|
#
|
||||||
|
# CFLAGS for gcc
|
||||||
|
#
|
||||||
|
CFLAGS_LAD = -Wno-uninitialized -Wno-unused-parameter
|
||||||
|
CFLAGS_LAD += -Wno-unused-variable
|
||||||
|
endif
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add extra flags for LAD source files to disable warnings in them
|
||||||
|
#
|
||||||
|
LAD_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_e1000/e1000/*.c)))
|
||||||
|
$(foreach obj, $(LAD_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_LAD)))
|
||||||
|
|
||||||
VPATH += $(RTE_SDK)/lib/librte_pmd_e1000/e1000
|
VPATH += $(RTE_SDK)/lib/librte_pmd_e1000/e1000
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -46,24 +46,6 @@
|
|||||||
|
|
||||||
#include "../e1000_logs.h"
|
#include "../e1000_logs.h"
|
||||||
|
|
||||||
/* Remove some compiler warnings for the files in this dir */
|
|
||||||
#ifdef __INTEL_COMPILER
|
|
||||||
#pragma warning(disable:2259) /* conversion may lose significant bits */
|
|
||||||
#pragma warning(disable:869) /* Parameter was never referenced */
|
|
||||||
#pragma warning(disable:181) /* Arg incompatible with format string */
|
|
||||||
#pragma warning(disable:188) /* enumerated type mixed with another type */
|
|
||||||
#pragma warning(disable:1599) /* declaration hides variable */
|
|
||||||
#pragma warning(disable:177) /* declared but never referenced */
|
|
||||||
#else
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
|
||||||
#pragma GCC diagnostic ignored "-Wformat"
|
|
||||||
#pragma GCC diagnostic ignored "-Wuninitialized"
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
|
||||||
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DELAY(x) rte_delay_us(x)
|
#define DELAY(x) rte_delay_us(x)
|
||||||
#define usec_delay(x) DELAY(x)
|
#define usec_delay(x) DELAY(x)
|
||||||
#define msec_delay(x) DELAY(1000*(x))
|
#define msec_delay(x) DELAY(1000*(x))
|
||||||
@ -140,7 +122,7 @@ static inline uint32_t e1000_read_addr(volatile void* addr)
|
|||||||
|
|
||||||
#define E1000_ACCESS_PANIC(x, hw, reg, value) \
|
#define E1000_ACCESS_PANIC(x, hw, reg, value) \
|
||||||
rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \
|
rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \
|
||||||
__FILE__, __LINE__, (hw), (reg), (value))
|
__FILE__, __LINE__, (hw), (reg), (unsigned int)(value))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To be able to do IO write, we need to map IO BAR
|
* To be able to do IO write, we need to map IO BAR
|
||||||
|
@ -1321,7 +1321,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
|
|||||||
* void
|
* void
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
eth_em_interrupt_handler(struct rte_intr_handle *handle, void *param)
|
eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
||||||
|
void *param)
|
||||||
{
|
{
|
||||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||||
|
|
||||||
|
@ -229,19 +229,20 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
|
|||||||
cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
|
cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
|
||||||
|
|
||||||
l2len = hdrlen.f.l2_len;
|
l2len = hdrlen.f.l2_len;
|
||||||
ipcse = l2len + hdrlen.f.l3_len;
|
ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
|
||||||
|
|
||||||
/* setup IPCS* fields */
|
/* setup IPCS* fields */
|
||||||
ctx.lower_setup.ip_fields.ipcss = l2len;
|
ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
|
||||||
ctx.lower_setup.ip_fields.ipcso =l2len +
|
ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
|
||||||
offsetof(struct ipv4_hdr, hdr_checksum);
|
offsetof(struct ipv4_hdr, hdr_checksum));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When doing checksum or TCP segmentation with IPv6 headers,
|
* When doing checksum or TCP segmentation with IPv6 headers,
|
||||||
* IPCSE field should be set t0 0.
|
* IPCSE field should be set t0 0.
|
||||||
*/
|
*/
|
||||||
if (flags & PKT_TX_IP_CKSUM) {
|
if (flags & PKT_TX_IP_CKSUM) {
|
||||||
ctx.lower_setup.ip_fields.ipcse = rte_cpu_to_le_16(ipcse - 1);
|
ctx.lower_setup.ip_fields.ipcse =
|
||||||
|
(uint16_t)rte_cpu_to_le_16(ipcse - 1);
|
||||||
cmd_len |= E1000_TXD_CMD_IP;
|
cmd_len |= E1000_TXD_CMD_IP;
|
||||||
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
||||||
} else {
|
} else {
|
||||||
@ -249,18 +250,18 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* setup TUCS* fields */
|
/* setup TUCS* fields */
|
||||||
ctx.upper_setup.tcp_fields.tucss = ipcse;
|
ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
|
||||||
ctx.upper_setup.tcp_fields.tucse = 0;
|
ctx.upper_setup.tcp_fields.tucse = 0;
|
||||||
|
|
||||||
switch (flags & PKT_TX_L4_MASK) {
|
switch (flags & PKT_TX_L4_MASK) {
|
||||||
case PKT_TX_UDP_CKSUM:
|
case PKT_TX_UDP_CKSUM:
|
||||||
ctx.upper_setup.tcp_fields.tucso = ipcse +
|
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
|
||||||
offsetof(struct udp_hdr, dgram_cksum);
|
offsetof(struct udp_hdr, dgram_cksum));
|
||||||
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
||||||
break;
|
break;
|
||||||
case PKT_TX_TCP_CKSUM:
|
case PKT_TX_TCP_CKSUM:
|
||||||
ctx.upper_setup.tcp_fields.tucso = ipcse +
|
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
|
||||||
offsetof(struct tcp_hdr, cksum);
|
offsetof(struct tcp_hdr, cksum));
|
||||||
cmd_len |= E1000_TXD_CMD_TCP;
|
cmd_len |= E1000_TXD_CMD_TCP;
|
||||||
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
|
||||||
break;
|
break;
|
||||||
@ -308,9 +309,9 @@ em_xmit_cleanup(struct em_tx_queue *txq)
|
|||||||
uint16_t nb_tx_to_clean;
|
uint16_t nb_tx_to_clean;
|
||||||
|
|
||||||
/* Determine the last descriptor needing to be cleaned */
|
/* Determine the last descriptor needing to be cleaned */
|
||||||
desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh;
|
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
|
||||||
if (desc_to_clean_to >= nb_tx_desc)
|
if (desc_to_clean_to >= nb_tx_desc)
|
||||||
desc_to_clean_to = desc_to_clean_to - nb_tx_desc;
|
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
|
||||||
|
|
||||||
/* Check to make sure the last descriptor to clean is done */
|
/* Check to make sure the last descriptor to clean is done */
|
||||||
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
|
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
|
||||||
@ -327,10 +328,11 @@ em_xmit_cleanup(struct em_tx_queue *txq)
|
|||||||
|
|
||||||
/* Figure out how many descriptors will be cleaned */
|
/* Figure out how many descriptors will be cleaned */
|
||||||
if (last_desc_cleaned > desc_to_clean_to)
|
if (last_desc_cleaned > desc_to_clean_to)
|
||||||
nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) +
|
nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
|
||||||
desc_to_clean_to);
|
desc_to_clean_to);
|
||||||
else
|
else
|
||||||
nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned;
|
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
|
||||||
|
last_desc_cleaned);
|
||||||
|
|
||||||
PMD_TX_FREE_LOG(DEBUG,
|
PMD_TX_FREE_LOG(DEBUG,
|
||||||
"Cleaning %4u TX descriptors: %4u to %4u "
|
"Cleaning %4u TX descriptors: %4u to %4u "
|
||||||
@ -348,7 +350,7 @@ em_xmit_cleanup(struct em_tx_queue *txq)
|
|||||||
|
|
||||||
/* Update the txq to reflect the last descriptor that was cleaned */
|
/* Update the txq to reflect the last descriptor that was cleaned */
|
||||||
txq->last_desc_cleaned = desc_to_clean_to;
|
txq->last_desc_cleaned = desc_to_clean_to;
|
||||||
txq->nb_tx_free += nb_tx_to_clean;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
|
||||||
|
|
||||||
/* No Error */
|
/* No Error */
|
||||||
return (0);
|
return (0);
|
||||||
@ -416,7 +418,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
ol_flags = tx_pkt->ol_flags;
|
ol_flags = tx_pkt->ol_flags;
|
||||||
|
|
||||||
/* If hardware offload required */
|
/* If hardware offload required */
|
||||||
tx_ol_req = ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK);
|
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
|
||||||
|
PKT_TX_L4_MASK));
|
||||||
if (tx_ol_req) {
|
if (tx_ol_req) {
|
||||||
hdrlen = tx_pkt->pkt.vlan_macip;
|
hdrlen = tx_pkt->pkt.vlan_macip;
|
||||||
/* If new context to be built or reuse the exist ctx. */
|
/* If new context to be built or reuse the exist ctx. */
|
||||||
@ -431,7 +434,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
* This will always be the number of segments + the number of
|
* This will always be the number of segments + the number of
|
||||||
* Context descriptors required to transmit the packet
|
* Context descriptors required to transmit the packet
|
||||||
*/
|
*/
|
||||||
nb_used = tx_pkt->pkt.nb_segs + new_ctx;
|
nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of descriptors that must be allocated for a
|
* The number of descriptors that must be allocated for a
|
||||||
@ -580,8 +583,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
* The last packet data descriptor needs End Of Packet (EOP)
|
* The last packet data descriptor needs End Of Packet (EOP)
|
||||||
*/
|
*/
|
||||||
cmd_type_len |= E1000_TXD_CMD_EOP;
|
cmd_type_len |= E1000_TXD_CMD_EOP;
|
||||||
txq->nb_tx_used += nb_used;
|
txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
|
||||||
txq->nb_tx_free -= nb_used;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
|
||||||
|
|
||||||
/* Set RS bit only on threshold packets' last descriptor */
|
/* Set RS bit only on threshold packets' last descriptor */
|
||||||
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
|
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
|
||||||
@ -624,8 +627,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
|
|||||||
uint16_t pkt_flags;
|
uint16_t pkt_flags;
|
||||||
|
|
||||||
/* Check if VLAN present */
|
/* Check if VLAN present */
|
||||||
pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ?
|
pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
|
||||||
PKT_RX_VLAN_PKT : 0;
|
PKT_RX_VLAN_PKT : 0);
|
||||||
|
|
||||||
return pkt_flags;
|
return pkt_flags;
|
||||||
}
|
}
|
||||||
@ -777,7 +780,8 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
rxm->pkt.in_port = rxq->port_id;
|
rxm->pkt.in_port = rxq->port_id;
|
||||||
|
|
||||||
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
|
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
|
||||||
rxm->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors);
|
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
|
||||||
|
rx_desc_error_to_pkt_flags(rxd.errors));
|
||||||
|
|
||||||
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
|
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
|
||||||
rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
|
rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
|
||||||
@ -1002,7 +1006,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
first_seg->pkt.in_port = rxq->port_id;
|
first_seg->pkt.in_port = rxq->port_id;
|
||||||
|
|
||||||
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
|
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
|
||||||
first_seg->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors);
|
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
|
||||||
|
rx_desc_error_to_pkt_flags(rxd.errors));
|
||||||
|
|
||||||
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
|
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
|
||||||
rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
|
rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
|
||||||
@ -1192,28 +1197,27 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
tx_free_thresh = tx_conf->tx_free_thresh;
|
tx_free_thresh = tx_conf->tx_free_thresh;
|
||||||
if (tx_free_thresh == 0)
|
if (tx_free_thresh == 0)
|
||||||
tx_free_thresh = RTE_MIN(nb_desc / 4, DEFAULT_TX_FREE_THRESH);
|
tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
|
||||||
|
DEFAULT_TX_FREE_THRESH);
|
||||||
|
|
||||||
tx_rs_thresh = tx_conf->tx_rs_thresh;
|
tx_rs_thresh = tx_conf->tx_rs_thresh;
|
||||||
if (tx_rs_thresh == 0)
|
if (tx_rs_thresh == 0)
|
||||||
tx_rs_thresh = RTE_MIN(tx_free_thresh, DEFAULT_TX_RS_THRESH);
|
tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
|
||||||
|
DEFAULT_TX_RS_THRESH);
|
||||||
|
|
||||||
if (tx_free_thresh >= (nb_desc - 3)) {
|
if (tx_free_thresh >= (nb_desc - 3)) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
|
||||||
"tx_free_thresh must be less than the "
|
"number of TX descriptors minus 3. (tx_free_thresh=%u "
|
||||||
"number of TX descriptors minus 3. "
|
"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
|
||||||
"(tx_free_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_free_thresh, dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
if (tx_rs_thresh > tx_free_thresh) {
|
if (tx_rs_thresh > tx_free_thresh) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
|
||||||
"tx_rs_thresh must be less than or equal to "
|
"tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
|
||||||
"tx_free_thresh. "
|
"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
|
||||||
"(tx_free_thresh=%u tx_rs_thresh=%u "
|
(unsigned int)tx_rs_thresh, (int)dev->data->port_id,
|
||||||
"port=%d queue=%d)\n",
|
(int)queue_idx);
|
||||||
tx_free_thresh, tx_rs_thresh, dev->data->port_id,
|
|
||||||
queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1224,11 +1228,10 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
* accumulates WTHRESH descriptors.
|
* accumulates WTHRESH descriptors.
|
||||||
*/
|
*/
|
||||||
if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
|
if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
|
||||||
"TX WTHRESH must be set to 0 if "
|
"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
|
||||||
"tx_rs_thresh is greater than 1. "
|
"port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
|
||||||
"(tx_rs_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_rs_thresh, dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1436,7 +1439,7 @@ em_dev_clear_queues(struct rte_eth_dev *dev)
|
|||||||
* Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
|
* Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
|
||||||
*/
|
*/
|
||||||
static uint32_t
|
static uint32_t
|
||||||
em_rctl_bsize(enum e1000_mac_type hwtyp, uint32_t *bufsz)
|
em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* For BSIZE & BSEX all configurable sizes are:
|
* For BSIZE & BSEX all configurable sizes are:
|
||||||
|
@ -1550,7 +1550,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
|
|||||||
* void
|
* void
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
|
eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
||||||
|
void *param)
|
||||||
{
|
{
|
||||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||||
|
|
||||||
@ -1829,7 +1830,8 @@ static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
|
|||||||
mask = 1;
|
mask = 1;
|
||||||
for (j = 0; j < 32; j++){
|
for (j = 0; j < 32; j++){
|
||||||
if(vfta & mask)
|
if(vfta & mask)
|
||||||
igbvf_set_vfta(hw, (i<<5)+j, on);
|
igbvf_set_vfta(hw,
|
||||||
|
(uint16_t)((i<<5)+j), on);
|
||||||
mask<<=1;
|
mask<<=1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -364,7 +364,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
|
|
||||||
ol_flags = tx_pkt->ol_flags;
|
ol_flags = tx_pkt->ol_flags;
|
||||||
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
|
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
|
||||||
tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
|
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
|
||||||
|
|
||||||
/* If a Context Descriptor need be built . */
|
/* If a Context Descriptor need be built . */
|
||||||
if (tx_ol_req) {
|
if (tx_ol_req) {
|
||||||
@ -569,15 +569,15 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
|
|||||||
0, 0, 0, 0,
|
0, 0, 0, 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
|
pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
|
||||||
ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
|
ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
|
||||||
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
|
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
|
||||||
#else
|
#else
|
||||||
pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
|
pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
|
||||||
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
|
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
|
||||||
#endif
|
#endif
|
||||||
return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
|
return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
|
||||||
PKT_RX_RSS_HASH);
|
0 : PKT_RX_RSS_HASH));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint16_t
|
static inline uint16_t
|
||||||
@ -586,11 +586,12 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
|
|||||||
uint16_t pkt_flags;
|
uint16_t pkt_flags;
|
||||||
|
|
||||||
/* Check if VLAN present */
|
/* Check if VLAN present */
|
||||||
pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
|
pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
|
||||||
|
PKT_RX_VLAN_PKT : 0);
|
||||||
|
|
||||||
#if defined(RTE_LIBRTE_IEEE1588)
|
#if defined(RTE_LIBRTE_IEEE1588)
|
||||||
if (rx_status & E1000_RXD_STAT_TMST)
|
if (rx_status & E1000_RXD_STAT_TMST)
|
||||||
pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
|
pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
|
||||||
#endif
|
#endif
|
||||||
return pkt_flags;
|
return pkt_flags;
|
||||||
}
|
}
|
||||||
@ -750,10 +751,10 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
||||||
|
|
||||||
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
||||||
pkt_flags = (pkt_flags |
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
rx_desc_status_to_pkt_flags(staterr));
|
rx_desc_status_to_pkt_flags(staterr));
|
||||||
pkt_flags = (pkt_flags |
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
rx_desc_error_to_pkt_flags(staterr));
|
rx_desc_error_to_pkt_flags(staterr));
|
||||||
rxm->ol_flags = pkt_flags;
|
rxm->ol_flags = pkt_flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -987,8 +988,10 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
||||||
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
|
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
|
||||||
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
||||||
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
|
rx_desc_status_to_pkt_flags(staterr));
|
||||||
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
|
rx_desc_error_to_pkt_flags(staterr));
|
||||||
first_seg->ol_flags = pkt_flags;
|
first_seg->ol_flags = pkt_flags;
|
||||||
|
|
||||||
/* Prefetch data of first segment, if configured to do so. */
|
/* Prefetch data of first segment, if configured to do so. */
|
||||||
@ -1137,7 +1140,7 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize ring entries */
|
/* Initialize ring entries */
|
||||||
prev = txq->nb_tx_desc - 1;
|
prev = (uint16_t)(txq->nb_tx_desc - 1);
|
||||||
for (i = 0; i < txq->nb_tx_desc; i++) {
|
for (i = 0; i < txq->nb_tx_desc; i++) {
|
||||||
volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
|
volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
|
||||||
|
|
||||||
|
@ -37,9 +37,35 @@ include $(RTE_SDK)/mk/rte.vars.mk
|
|||||||
#
|
#
|
||||||
LIB = librte_pmd_ixgbe.a
|
LIB = librte_pmd_ixgbe.a
|
||||||
|
|
||||||
CFLAGS += -O3 -Wno-deprecated
|
CFLAGS += -O3
|
||||||
CFLAGS += $(WERROR_FLAGS)
|
CFLAGS += $(WERROR_FLAGS)
|
||||||
|
|
||||||
|
ifeq ($(CC), icc)
|
||||||
|
#
|
||||||
|
# CFLAGS for icc
|
||||||
|
#
|
||||||
|
CFLAGS_LAD = -wd174 -wd593 -wd869 -wd981 -wd2259
|
||||||
|
else
|
||||||
|
#
|
||||||
|
# CFLAGS for gcc
|
||||||
|
#
|
||||||
|
ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 && echo 1), 1)
|
||||||
|
CFLAGS += -Wno-deprecated
|
||||||
|
endif
|
||||||
|
CFLAGS_LAD = -Wno-unused-parameter -Wno-unused-value
|
||||||
|
CFLAGS_LAD += -Wno-strict-aliasing -Wno-format-extra-args
|
||||||
|
|
||||||
|
ifeq ($(shell test $(GCC_MAJOR_VERSION) -ge 4 -a $(GCC_MINOR_VERSION) -ge 6 && echo 1), 1)
|
||||||
|
CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add extra flags for LAD source files to disable warnings in them
|
||||||
|
#
|
||||||
|
LAD_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe/*.c)))
|
||||||
|
$(foreach obj, $(LAD_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_LAD)))
|
||||||
|
|
||||||
VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
|
VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -47,31 +47,6 @@
|
|||||||
|
|
||||||
#include "../ixgbe_logs.h"
|
#include "../ixgbe_logs.h"
|
||||||
|
|
||||||
/* Remove some compiler warnings for the files in this dir */
|
|
||||||
#ifdef __INTEL_COMPILER
|
|
||||||
#pragma warning(disable:2259) /* Conversion may lose significant bits */
|
|
||||||
#pragma warning(disable:869) /* Parameter was never referenced */
|
|
||||||
#pragma warning(disable:181) /* Arg incompatible with format string */
|
|
||||||
#pragma warning(disable:1419) /* External declaration in primary source file */
|
|
||||||
#pragma warning(disable:111) /* Statement is unreachable */
|
|
||||||
#pragma warning(disable:981) /* Operands are evaluated in unspecified order */
|
|
||||||
#pragma warning(disable:593) /* Variable was set but never used */
|
|
||||||
#pragma warning(disable:174) /* expression has no effect */
|
|
||||||
#else
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
|
||||||
#pragma GCC diagnostic ignored "-Wformat"
|
|
||||||
#pragma GCC diagnostic ignored "-Wuninitialized"
|
|
||||||
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-value"
|
|
||||||
#pragma GCC diagnostic ignored "-Wformat-extra-args"
|
|
||||||
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 6))
|
|
||||||
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
|
|
||||||
#endif
|
|
||||||
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
|
|
||||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
|
#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
|
||||||
|
|
||||||
#define DELAY(x) rte_delay_us(x)
|
#define DELAY(x) rte_delay_us(x)
|
||||||
|
@ -414,12 +414,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
|
|||||||
PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
|
PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
|
||||||
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
|
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
|
||||||
|
|
||||||
n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
|
n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
|
||||||
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
|
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
|
||||||
PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
|
PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
|
offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
|
||||||
|
|
||||||
/* Now clear any previous stat_idx set */
|
/* Now clear any previous stat_idx set */
|
||||||
clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
|
clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
|
||||||
@ -478,16 +478,18 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
|
|||||||
{
|
{
|
||||||
uint8_t i;
|
uint8_t i;
|
||||||
struct ixgbe_dcb_tc_config *tc;
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
int dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
|
uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
|
||||||
|
|
||||||
dcb_config->num_tcs.pg_tcs = dcb_max_tc;
|
dcb_config->num_tcs.pg_tcs = dcb_max_tc;
|
||||||
dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
|
dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
|
||||||
for (i = 0; i < dcb_max_tc; i++) {
|
for (i = 0; i < dcb_max_tc; i++) {
|
||||||
tc = &dcb_config->tc_config[i];
|
tc = &dcb_config->tc_config[i];
|
||||||
tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
|
||||||
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
|
||||||
|
(uint8_t)(100/dcb_max_tc + (i & 1));
|
||||||
tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
|
||||||
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
|
||||||
|
(uint8_t)(100/dcb_max_tc + (i & 1));
|
||||||
tc->pfc = ixgbe_dcb_pfc_disabled;
|
tc->pfc = ixgbe_dcb_pfc_disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1326,12 +1328,12 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
|||||||
hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
|
hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
|
||||||
|
|
||||||
if (hw->mac.type != ixgbe_mac_82598EB) {
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
|
hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
|
||||||
((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
|
hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
|
||||||
hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
|
hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
|
||||||
((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
|
hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
|
||||||
hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
|
hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
|
||||||
((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
|
hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
|
||||||
hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
|
hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
|
||||||
hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
|
hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
|
||||||
} else {
|
} else {
|
||||||
@ -1506,8 +1508,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
|||||||
{
|
{
|
||||||
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
|
||||||
dev_info->max_rx_queues = hw->mac.max_rx_queues;
|
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
|
||||||
dev_info->max_tx_queues = hw->mac.max_tx_queues;
|
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
|
||||||
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
|
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
|
||||||
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
|
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
|
||||||
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
|
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
|
||||||
@ -1792,7 +1794,8 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
|
|||||||
* void
|
* void
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
|
ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
||||||
|
void *param)
|
||||||
{
|
{
|
||||||
int64_t timeout;
|
int64_t timeout;
|
||||||
struct rte_eth_link link;
|
struct rte_eth_link link;
|
||||||
@ -2297,7 +2300,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
|||||||
{
|
{
|
||||||
struct ixgbe_hw *hw =
|
struct ixgbe_hw *hw =
|
||||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
uint32_t i, on = 0;
|
uint16_t i;
|
||||||
|
int on = 0;
|
||||||
|
|
||||||
/* VF function only support hw strip feature, others are not support */
|
/* VF function only support hw strip feature, others are not support */
|
||||||
if(mask & ETH_VLAN_STRIP_MASK){
|
if(mask & ETH_VLAN_STRIP_MASK){
|
||||||
@ -2307,4 +2311,3 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
|||||||
ixgbevf_vlan_strip_queue_set(dev,i,on);
|
ixgbevf_vlan_strip_queue_set(dev,i,on);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -501,8 +501,8 @@ ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
|
|||||||
* that it can be used for removing signature and perfect filters.
|
* that it can be used for removing signature and perfect filters.
|
||||||
*/
|
*/
|
||||||
static s32
|
static s32
|
||||||
fdir_erase_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
|
fdir_erase_filter_82599(struct ixgbe_hw *hw,
|
||||||
uint32_t fdirhash)
|
__rte_unused union ixgbe_atr_input *input, uint32_t fdirhash)
|
||||||
{
|
{
|
||||||
u32 fdircmd = 0;
|
u32 fdircmd = 0;
|
||||||
u32 retry_count;
|
u32 retry_count;
|
||||||
|
@ -283,10 +283,10 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* buffers were freed, update counters */
|
/* buffers were freed, update counters */
|
||||||
txq->nb_tx_free += txq->tx_rs_thresh;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
|
||||||
txq->tx_next_dd += txq->tx_rs_thresh;
|
txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
|
||||||
if (txq->tx_next_dd >= txq->nb_tx_desc)
|
if (txq->tx_next_dd >= txq->nb_tx_desc)
|
||||||
txq->tx_next_dd = txq->tx_rs_thresh - 1;
|
txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
|
||||||
|
|
||||||
return txq->tx_rs_thresh;
|
return txq->tx_rs_thresh;
|
||||||
}
|
}
|
||||||
@ -397,12 +397,12 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
ixgbe_tx_free_bufs(txq);
|
ixgbe_tx_free_bufs(txq);
|
||||||
|
|
||||||
/* Only use descriptors that are available */
|
/* Only use descriptors that are available */
|
||||||
nb_pkts = RTE_MIN(txq->nb_tx_free, nb_pkts);
|
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
|
||||||
if (unlikely(nb_pkts == 0))
|
if (unlikely(nb_pkts == 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Use exactly nb_pkts descriptors */
|
/* Use exactly nb_pkts descriptors */
|
||||||
txq->nb_tx_free -= nb_pkts;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, we know there are enough descriptors in the
|
* At this point, we know there are enough descriptors in the
|
||||||
@ -417,7 +417,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
* the processing looks just like the "bottom" part anyway...
|
* the processing looks just like the "bottom" part anyway...
|
||||||
*/
|
*/
|
||||||
if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
|
if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
|
||||||
n = txq->nb_tx_desc - txq->tx_tail;
|
n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
|
||||||
ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
|
ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -427,14 +427,14 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
*/
|
*/
|
||||||
tx_r[txq->tx_next_rs].read.cmd_type_len |=
|
tx_r[txq->tx_next_rs].read.cmd_type_len |=
|
||||||
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
|
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
|
||||||
txq->tx_next_rs = txq->tx_rs_thresh - 1;
|
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
|
||||||
|
|
||||||
txq->tx_tail = 0;
|
txq->tx_tail = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill H/W descriptor ring with mbuf data */
|
/* Fill H/W descriptor ring with mbuf data */
|
||||||
ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, nb_pkts - n);
|
ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
|
||||||
txq->tx_tail += (nb_pkts - n);
|
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine if RS bit should be set
|
* Determine if RS bit should be set
|
||||||
@ -446,9 +446,10 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
if (txq->tx_tail > txq->tx_next_rs) {
|
if (txq->tx_tail > txq->tx_next_rs) {
|
||||||
tx_r[txq->tx_next_rs].read.cmd_type_len |=
|
tx_r[txq->tx_next_rs].read.cmd_type_len |=
|
||||||
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
|
rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
|
||||||
txq->tx_next_rs += txq->tx_rs_thresh;
|
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
|
||||||
|
txq->tx_rs_thresh);
|
||||||
if (txq->tx_next_rs >= txq->nb_tx_desc)
|
if (txq->tx_next_rs >= txq->nb_tx_desc)
|
||||||
txq->tx_next_rs = txq->tx_rs_thresh - 1;
|
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -479,10 +480,10 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
nb_tx = 0;
|
nb_tx = 0;
|
||||||
while (nb_pkts) {
|
while (nb_pkts) {
|
||||||
uint16_t ret, n;
|
uint16_t ret, n;
|
||||||
n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
|
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
|
||||||
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
|
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
|
||||||
nb_tx += ret;
|
nb_tx = (uint16_t)(nb_tx + ret);
|
||||||
nb_pkts -= ret;
|
nb_pkts = (uint16_t)(nb_pkts - ret);
|
||||||
if (ret < n)
|
if (ret < n)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -617,9 +618,9 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
|
|||||||
uint16_t nb_tx_to_clean;
|
uint16_t nb_tx_to_clean;
|
||||||
|
|
||||||
/* Determine the last descriptor needing to be cleaned */
|
/* Determine the last descriptor needing to be cleaned */
|
||||||
desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh;
|
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
|
||||||
if (desc_to_clean_to >= nb_tx_desc)
|
if (desc_to_clean_to >= nb_tx_desc)
|
||||||
desc_to_clean_to = desc_to_clean_to - nb_tx_desc;
|
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
|
||||||
|
|
||||||
/* Check to make sure the last descriptor to clean is done */
|
/* Check to make sure the last descriptor to clean is done */
|
||||||
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
|
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
|
||||||
@ -636,10 +637,11 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
|
|||||||
|
|
||||||
/* Figure out how many descriptors will be cleaned */
|
/* Figure out how many descriptors will be cleaned */
|
||||||
if (last_desc_cleaned > desc_to_clean_to)
|
if (last_desc_cleaned > desc_to_clean_to)
|
||||||
nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) +
|
nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
|
||||||
desc_to_clean_to);
|
desc_to_clean_to);
|
||||||
else
|
else
|
||||||
nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned;
|
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
|
||||||
|
last_desc_cleaned);
|
||||||
|
|
||||||
PMD_TX_FREE_LOG(DEBUG,
|
PMD_TX_FREE_LOG(DEBUG,
|
||||||
"Cleaning %4u TX descriptors: %4u to %4u "
|
"Cleaning %4u TX descriptors: %4u to %4u "
|
||||||
@ -657,7 +659,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
|
|||||||
|
|
||||||
/* Update the txq to reflect the last descriptor that was cleaned */
|
/* Update the txq to reflect the last descriptor that was cleaned */
|
||||||
txq->last_desc_cleaned = desc_to_clean_to;
|
txq->last_desc_cleaned = desc_to_clean_to;
|
||||||
txq->nb_tx_free += nb_tx_to_clean;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
|
||||||
|
|
||||||
/* No Error */
|
/* No Error */
|
||||||
return (0);
|
return (0);
|
||||||
@ -716,7 +718,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
|
vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
|
||||||
|
|
||||||
/* If hardware offload required */
|
/* If hardware offload required */
|
||||||
tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
|
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
|
||||||
if (tx_ol_req) {
|
if (tx_ol_req) {
|
||||||
/* If new context need be built or reuse the exist ctx. */
|
/* If new context need be built or reuse the exist ctx. */
|
||||||
ctx = what_advctx_update(txq, tx_ol_req,
|
ctx = what_advctx_update(txq, tx_ol_req,
|
||||||
@ -731,7 +733,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
* This will always be the number of segments + the number of
|
* This will always be the number of segments + the number of
|
||||||
* Context descriptors required to transmit the packet
|
* Context descriptors required to transmit the packet
|
||||||
*/
|
*/
|
||||||
nb_used = tx_pkt->pkt.nb_segs + new_ctx;
|
nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of descriptors that must be allocated for a
|
* The number of descriptors that must be allocated for a
|
||||||
@ -909,8 +911,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
* The last packet data descriptor needs End Of Packet (EOP)
|
* The last packet data descriptor needs End Of Packet (EOP)
|
||||||
*/
|
*/
|
||||||
cmd_type_len |= IXGBE_TXD_CMD_EOP;
|
cmd_type_len |= IXGBE_TXD_CMD_EOP;
|
||||||
txq->nb_tx_used += nb_used;
|
txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
|
||||||
txq->nb_tx_free -= nb_used;
|
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
|
||||||
|
|
||||||
/* Set RS bit only on threshold packets' last descriptor */
|
/* Set RS bit only on threshold packets' last descriptor */
|
||||||
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
|
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
|
||||||
@ -979,7 +981,7 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
|
|||||||
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
|
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
|
return (uint16_t)(pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint16_t
|
static inline uint16_t
|
||||||
@ -992,11 +994,12 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
|
|||||||
* Do not check whether L3/L4 rx checksum done by NIC or not,
|
* Do not check whether L3/L4 rx checksum done by NIC or not,
|
||||||
* That can be found from rte_eth_rxmode.hw_ip_checksum flag
|
* That can be found from rte_eth_rxmode.hw_ip_checksum flag
|
||||||
*/
|
*/
|
||||||
pkt_flags = (uint16_t) (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
|
pkt_flags = (uint16_t)((rx_status & IXGBE_RXD_STAT_VP) ?
|
||||||
|
PKT_RX_VLAN_PKT : 0);
|
||||||
|
|
||||||
#ifdef RTE_LIBRTE_IEEE1588
|
#ifdef RTE_LIBRTE_IEEE1588
|
||||||
if (rx_status & IXGBE_RXD_STAT_TMST)
|
if (rx_status & IXGBE_RXD_STAT_TMST)
|
||||||
pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST);
|
pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
|
||||||
#endif
|
#endif
|
||||||
return pkt_flags;
|
return pkt_flags;
|
||||||
}
|
}
|
||||||
@ -1069,7 +1072,8 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
|
|||||||
/* Translate descriptor info to mbuf format */
|
/* Translate descriptor info to mbuf format */
|
||||||
for (j = 0; j < nb_dd; ++j) {
|
for (j = 0; j < nb_dd; ++j) {
|
||||||
mb = rxep[j].mbuf;
|
mb = rxep[j].mbuf;
|
||||||
pkt_len = rxdp[j].wb.upper.length - rxq->crc_len;
|
pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
|
||||||
|
rxq->crc_len);
|
||||||
mb->pkt.data_len = pkt_len;
|
mb->pkt.data_len = pkt_len;
|
||||||
mb->pkt.pkt_len = pkt_len;
|
mb->pkt.pkt_len = pkt_len;
|
||||||
mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
|
mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
|
||||||
@ -1079,8 +1083,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
|
|||||||
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
|
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
|
||||||
rxdp[j].wb.lower.lo_dword.data);
|
rxdp[j].wb.lower.lo_dword.data);
|
||||||
/* reuse status field from scan list */
|
/* reuse status field from scan list */
|
||||||
mb->ol_flags |= rx_desc_status_to_pkt_flags(s[j]);
|
mb->ol_flags = (uint16_t)(mb->ol_flags |
|
||||||
mb->ol_flags |= rx_desc_error_to_pkt_flags(s[j]);
|
rx_desc_status_to_pkt_flags(s[j]));
|
||||||
|
mb->ol_flags = (uint16_t)(mb->ol_flags |
|
||||||
|
rx_desc_error_to_pkt_flags(s[j]));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Move mbuf pointers from the S/W ring to the stage */
|
/* Move mbuf pointers from the S/W ring to the stage */
|
||||||
@ -1094,8 +1100,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* clear software ring entries so we can cleanup correctly */
|
/* clear software ring entries so we can cleanup correctly */
|
||||||
for (i = 0; i < nb_rx; ++i)
|
for (i = 0; i < nb_rx; ++i) {
|
||||||
rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
|
rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
return nb_rx;
|
return nb_rx;
|
||||||
}
|
}
|
||||||
@ -1111,7 +1119,8 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
|
|||||||
int diag, i;
|
int diag, i;
|
||||||
|
|
||||||
/* allocate buffers in bulk directly into the S/W ring */
|
/* allocate buffers in bulk directly into the S/W ring */
|
||||||
alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
|
alloc_idx = (uint16_t)(rxq->rx_free_trigger -
|
||||||
|
(rxq->rx_free_thresh - 1));
|
||||||
rxep = &rxq->sw_ring[alloc_idx];
|
rxep = &rxq->sw_ring[alloc_idx];
|
||||||
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
|
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
|
||||||
rxq->rx_free_thresh);
|
rxq->rx_free_thresh);
|
||||||
@ -1140,9 +1149,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
|
|||||||
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
|
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
|
||||||
|
|
||||||
/* update state of internal queue structure */
|
/* update state of internal queue structure */
|
||||||
rxq->rx_free_trigger += rxq->rx_free_thresh;
|
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
|
||||||
|
rxq->rx_free_thresh);
|
||||||
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
|
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
|
||||||
rxq->rx_free_trigger = (rxq->rx_free_thresh - 1);
|
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
|
||||||
|
|
||||||
/* no errors */
|
/* no errors */
|
||||||
return 0;
|
return 0;
|
||||||
@ -1156,15 +1166,15 @@ ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* how many packets are ready to return? */
|
/* how many packets are ready to return? */
|
||||||
nb_pkts = RTE_MIN(nb_pkts, rxq->rx_nb_avail);
|
nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
|
||||||
|
|
||||||
/* copy mbuf pointers to the application's packet list */
|
/* copy mbuf pointers to the application's packet list */
|
||||||
for (i = 0; i < nb_pkts; ++i)
|
for (i = 0; i < nb_pkts; ++i)
|
||||||
rx_pkts[i] = stage[i];
|
rx_pkts[i] = stage[i];
|
||||||
|
|
||||||
/* update internal queue state */
|
/* update internal queue state */
|
||||||
rxq->rx_nb_avail -= nb_pkts;
|
rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
|
||||||
rxq->rx_next_avail += nb_pkts;
|
rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
|
||||||
|
|
||||||
return nb_pkts;
|
return nb_pkts;
|
||||||
}
|
}
|
||||||
@ -1181,12 +1191,12 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
|
return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
|
||||||
|
|
||||||
/* Scan the H/W ring for packets to receive */
|
/* Scan the H/W ring for packets to receive */
|
||||||
nb_rx = ixgbe_rx_scan_hw_ring(rxq);
|
nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
|
||||||
|
|
||||||
/* update internal queue state */
|
/* update internal queue state */
|
||||||
rxq->rx_next_avail = 0;
|
rxq->rx_next_avail = 0;
|
||||||
rxq->rx_nb_avail = nb_rx;
|
rxq->rx_nb_avail = nb_rx;
|
||||||
rxq->rx_tail += nb_rx;
|
rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
|
||||||
|
|
||||||
/* if required, allocate new buffers to replenish descriptors */
|
/* if required, allocate new buffers to replenish descriptors */
|
||||||
if (rxq->rx_tail > rxq->rx_free_trigger) {
|
if (rxq->rx_tail > rxq->rx_free_trigger) {
|
||||||
@ -1204,7 +1214,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
* allocate new buffers to replenish the old ones.
|
* allocate new buffers to replenish the old ones.
|
||||||
*/
|
*/
|
||||||
rxq->rx_nb_avail = 0;
|
rxq->rx_nb_avail = 0;
|
||||||
rxq->rx_tail -= nb_rx;
|
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
|
||||||
for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
|
for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
|
||||||
rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
|
rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
|
||||||
|
|
||||||
@ -1239,10 +1249,10 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
nb_rx = 0;
|
nb_rx = 0;
|
||||||
while (nb_pkts) {
|
while (nb_pkts) {
|
||||||
uint16_t ret, n;
|
uint16_t ret, n;
|
||||||
n = RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
|
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
|
||||||
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
|
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
|
||||||
nb_rx += ret;
|
nb_rx = (uint16_t)(nb_rx + ret);
|
||||||
nb_pkts -= ret;
|
nb_pkts = (uint16_t)(nb_pkts - ret);
|
||||||
if (ret < n)
|
if (ret < n)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1389,8 +1399,10 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
||||||
|
|
||||||
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
||||||
pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
|
rx_desc_status_to_pkt_flags(staterr));
|
||||||
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
|
rx_desc_error_to_pkt_flags(staterr));
|
||||||
rxm->ol_flags = pkt_flags;
|
rxm->ol_flags = pkt_flags;
|
||||||
|
|
||||||
if (likely(pkt_flags & PKT_RX_RSS_HASH))
|
if (likely(pkt_flags & PKT_RX_RSS_HASH))
|
||||||
@ -1632,10 +1644,10 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|||||||
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
rte_le_to_cpu_16(rxd.wb.upper.vlan);
|
||||||
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
|
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
|
||||||
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
|
||||||
pkt_flags = (pkt_flags |
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
rx_desc_status_to_pkt_flags(staterr));
|
rx_desc_status_to_pkt_flags(staterr));
|
||||||
pkt_flags = (pkt_flags |
|
pkt_flags = (uint16_t)(pkt_flags |
|
||||||
rx_desc_error_to_pkt_flags(staterr));
|
rx_desc_error_to_pkt_flags(staterr));
|
||||||
first_seg->ol_flags = pkt_flags;
|
first_seg->ol_flags = pkt_flags;
|
||||||
|
|
||||||
if (likely(pkt_flags & PKT_RX_RSS_HASH))
|
if (likely(pkt_flags & PKT_RX_RSS_HASH))
|
||||||
@ -1802,8 +1814,8 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
|
|||||||
prev = i;
|
prev = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
txq->tx_next_dd = txq->tx_rs_thresh - 1;
|
txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
|
||||||
txq->tx_next_rs = txq->tx_rs_thresh - 1;
|
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
|
||||||
|
|
||||||
txq->tx_tail = 0;
|
txq->tx_tail = 0;
|
||||||
txq->nb_tx_used = 0;
|
txq->nb_tx_used = 0;
|
||||||
@ -1864,43 +1876,38 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
* H/W race condition, hence the maximum threshold constraints.
|
* H/W race condition, hence the maximum threshold constraints.
|
||||||
* When set to zero use default values.
|
* When set to zero use default values.
|
||||||
*/
|
*/
|
||||||
tx_rs_thresh = (tx_conf->tx_rs_thresh) ?
|
tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
|
||||||
tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH;
|
tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
|
||||||
tx_free_thresh = (tx_conf->tx_free_thresh) ?
|
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
|
||||||
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
|
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
|
||||||
if (tx_rs_thresh >= (nb_desc - 2)) {
|
if (tx_rs_thresh >= (nb_desc - 2)) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number "
|
||||||
"tx_rs_thresh must be less than the "
|
"of TX descriptors minus 2. (tx_rs_thresh=%u port=%d "
|
||||||
"number of TX descriptors minus 2. "
|
"queue=%d)\n", (unsigned int)tx_rs_thresh,
|
||||||
"(tx_rs_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_rs_thresh, dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
if (tx_free_thresh >= (nb_desc - 3)) {
|
if (tx_free_thresh >= (nb_desc - 3)) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the "
|
||||||
"tx_rs_thresh must be less than the "
|
"tx_free_thresh must be less than the number of TX "
|
||||||
"tx_free_thresh must be less than the "
|
"descriptors minus 3. (tx_free_thresh=%u port=%d "
|
||||||
"number of TX descriptors minus 3. "
|
"queue=%d)\n", (unsigned int)tx_free_thresh,
|
||||||
"(tx_free_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_free_thresh, dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
if (tx_rs_thresh > tx_free_thresh) {
|
if (tx_rs_thresh > tx_free_thresh) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
|
||||||
"tx_rs_thresh must be less than or equal to "
|
"tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
|
||||||
"tx_free_thresh. "
|
"port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
|
||||||
"(tx_free_thresh=%u tx_rs_thresh=%u "
|
(unsigned int)tx_rs_thresh, (int)dev->data->port_id,
|
||||||
"port=%d queue=%d)\n",
|
(int)queue_idx);
|
||||||
tx_free_thresh, tx_rs_thresh,
|
|
||||||
dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
if ((nb_desc % tx_rs_thresh) != 0) {
|
if ((nb_desc % tx_rs_thresh) != 0) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the "
|
||||||
"tx_rs_thresh must be a divisor of the"
|
"number of TX descriptors. (tx_rs_thresh=%u port=%d "
|
||||||
"number of TX descriptors. "
|
"queue=%d)\n", (unsigned int)tx_rs_thresh,
|
||||||
"(tx_rs_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_rs_thresh, dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1911,12 +1918,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
* accumulates WTHRESH descriptors.
|
* accumulates WTHRESH descriptors.
|
||||||
*/
|
*/
|
||||||
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
|
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
|
||||||
RTE_LOG(ERR, PMD,
|
RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
|
||||||
"TX WTHRESH must be set to 0 if "
|
"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
|
||||||
"tx_rs_thresh is greater than 1. "
|
"port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
|
||||||
"(tx_rs_thresh=%u port=%d queue=%d)\n",
|
(int)dev->data->port_id, (int)queue_idx);
|
||||||
tx_rs_thresh,
|
|
||||||
dev->data->port_id, queue_idx);
|
|
||||||
return -(EINVAL);
|
return -(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2039,7 +2044,11 @@ ixgbe_dev_rx_queue_release(void *rxq)
|
|||||||
* function must be used.
|
* function must be used.
|
||||||
*/
|
*/
|
||||||
static inline int
|
static inline int
|
||||||
|
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
||||||
check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
|
check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
|
||||||
|
#else
|
||||||
|
check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -2089,7 +2098,7 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
|
|||||||
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
||||||
if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
|
if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
|
||||||
/* zero out extra memory */
|
/* zero out extra memory */
|
||||||
len = rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
|
len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
/* do not zero out extra memory */
|
/* do not zero out extra memory */
|
||||||
@ -2116,7 +2125,7 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
|
|||||||
|
|
||||||
rxq->rx_nb_avail = 0;
|
rxq->rx_nb_avail = 0;
|
||||||
rxq->rx_next_avail = 0;
|
rxq->rx_next_avail = 0;
|
||||||
rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
|
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
|
||||||
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
|
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
|
||||||
rxq->rx_tail = 0;
|
rxq->rx_tail = 0;
|
||||||
rxq->nb_rx_hold = 0;
|
rxq->nb_rx_hold = 0;
|
||||||
@ -2166,8 +2175,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
|
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
|
||||||
rxq->queue_id = queue_idx;
|
rxq->queue_id = queue_idx;
|
||||||
rxq->port_id = dev->data->port_id;
|
rxq->port_id = dev->data->port_id;
|
||||||
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
|
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
|
||||||
ETHER_CRC_LEN);
|
0 : ETHER_CRC_LEN);
|
||||||
rxq->drop_en = rx_conf->rx_drop_en;
|
rxq->drop_en = rx_conf->rx_drop_en;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2199,7 +2208,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
* function does not access an invalid memory region.
|
* function does not access an invalid memory region.
|
||||||
*/
|
*/
|
||||||
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
||||||
len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
|
len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
|
||||||
#else
|
#else
|
||||||
len = nb_desc;
|
len = nb_desc;
|
||||||
#endif
|
#endif
|
||||||
@ -2583,7 +2592,8 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
|
|||||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
j = vmdq_rx_conf->dcb_queue[i];
|
j = vmdq_rx_conf->dcb_queue[i];
|
||||||
tc = &dcb_config->tc_config[j];
|
tc = &dcb_config->tc_config[j];
|
||||||
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
|
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
|
||||||
|
(uint8_t)(1 << j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2610,46 +2620,51 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
|
|||||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
j = vmdq_tx_conf->dcb_queue[i];
|
j = vmdq_tx_conf->dcb_queue[i];
|
||||||
tc = &dcb_config->tc_config[j];
|
tc = &dcb_config->tc_config[j];
|
||||||
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
|
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
|
||||||
|
(uint8_t)(1 << j);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ixgbe_dcb_rx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
|
ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
{
|
{
|
||||||
struct rte_eth_dcb_rx_conf *rx_conf =
|
struct rte_eth_dcb_rx_conf *rx_conf =
|
||||||
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
|
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
|
||||||
struct ixgbe_dcb_tc_config *tc;
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
uint8_t i,j;
|
uint8_t i,j;
|
||||||
|
|
||||||
dcb_config->num_tcs.pg_tcs = rx_conf->nb_tcs;
|
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
|
||||||
dcb_config->num_tcs.pfc_tcs = rx_conf->nb_tcs;
|
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
|
||||||
|
|
||||||
/* User Priority to Traffic Class mapping */
|
/* User Priority to Traffic Class mapping */
|
||||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
j = rx_conf->dcb_queue[i];
|
j = rx_conf->dcb_queue[i];
|
||||||
tc = &dcb_config->tc_config[j];
|
tc = &dcb_config->tc_config[j];
|
||||||
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
|
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
|
||||||
|
(uint8_t)(1 << j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ixgbe_dcb_tx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
|
ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
{
|
{
|
||||||
struct rte_eth_dcb_tx_conf *tx_conf =
|
struct rte_eth_dcb_tx_conf *tx_conf =
|
||||||
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
|
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
|
||||||
struct ixgbe_dcb_tc_config *tc;
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
uint8_t i,j;
|
uint8_t i,j;
|
||||||
|
|
||||||
dcb_config->num_tcs.pg_tcs = tx_conf->nb_tcs;
|
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
|
||||||
dcb_config->num_tcs.pfc_tcs = tx_conf->nb_tcs;
|
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
|
||||||
|
|
||||||
/* User Priority to Traffic Class mapping */
|
/* User Priority to Traffic Class mapping */
|
||||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
j = tx_conf->dcb_queue[i];
|
j = tx_conf->dcb_queue[i];
|
||||||
tc = &dcb_config->tc_config[j];
|
tc = &dcb_config->tc_config[j];
|
||||||
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
|
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
|
||||||
|
(uint8_t)(1 << j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2842,7 +2857,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
|
|||||||
uint8_t j = 4;
|
uint8_t j = 4;
|
||||||
uint8_t mask = 0xFF;
|
uint8_t mask = 0xFF;
|
||||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
|
||||||
mask &= ~ (1 << map[i]);
|
mask = (uint8_t)(mask & (~ (1 << map[i])));
|
||||||
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
|
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
|
||||||
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
|
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
|
||||||
map[j++] = i;
|
map[j++] = i;
|
||||||
@ -2851,8 +2866,10 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
|
|||||||
/* Re-configure 4 TCs BW */
|
/* Re-configure 4 TCs BW */
|
||||||
for (i = 0; i < nb_tcs; i++) {
|
for (i = 0; i < nb_tcs; i++) {
|
||||||
tc = &dcb_config->tc_config[i];
|
tc = &dcb_config->tc_config[i];
|
||||||
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / nb_tcs;
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
|
||||||
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / nb_tcs;
|
(uint8_t)(100 / nb_tcs);
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
|
||||||
|
(uint8_t)(100 / nb_tcs);
|
||||||
}
|
}
|
||||||
for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
||||||
tc = &dcb_config->tc_config[i];
|
tc = &dcb_config->tc_config[i];
|
||||||
|
Loading…
Reference in New Issue
Block a user