net/mlx5: use dynamic logging
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
0f99970b4a
commit
a170a30d22
@ -77,6 +77,9 @@
|
||||
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
|
||||
#endif
|
||||
|
||||
/** Driver-specific log messages type. */
|
||||
int mlx5_logtype;
|
||||
|
||||
/**
|
||||
* Retrieve integer value from environment variable.
|
||||
*
|
||||
@ -165,9 +168,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
DEBUG("port %u closing device \"%s\"",
|
||||
dev->data->port_id,
|
||||
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
||||
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
|
||||
dev->data->port_id,
|
||||
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
||||
/* In case mlx5_dev_stop() has not been called. */
|
||||
mlx5_dev_interrupt_handler_uninstall(dev);
|
||||
mlx5_traffic_disable(dev);
|
||||
@ -204,35 +207,36 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
mlx5_socket_uninit(dev);
|
||||
ret = mlx5_hrxq_ibv_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some hash Rx queue still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_ind_table_ibv_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some indirection table still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some indirection table still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_rxq_ibv_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some Verbs Rx queue still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_rxq_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some Rx queues still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some Rx queues still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_txq_ibv_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some Verbs Tx queue still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_txq_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some Tx queues still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some Tx queues still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_flow_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some flows still remain", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some flows still remain",
|
||||
dev->data->port_id);
|
||||
ret = mlx5_mr_verify(dev);
|
||||
if (ret)
|
||||
WARN("port %u some memory region still remain",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u some memory region still remain",
|
||||
dev->data->port_id);
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
}
|
||||
|
||||
@ -384,7 +388,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
tmp = strtoul(val, NULL, 0);
|
||||
if (errno) {
|
||||
rte_errno = errno;
|
||||
WARN("%s: \"%s\" is not a valid integer", key, val);
|
||||
DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
|
||||
return -rte_errno;
|
||||
}
|
||||
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
|
||||
@ -404,7 +408,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
|
||||
config->rx_vec_en = !!tmp;
|
||||
} else {
|
||||
WARN("%s: unknown parameter", key);
|
||||
DRV_LOG(WARNING, "%s: unknown parameter", key);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -508,17 +512,18 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
|
||||
addr = mmap(addr, MLX5_UAR_SIZE,
|
||||
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
ERROR("port %u failed to reserve UAR address space, please"
|
||||
" adjust MLX5_UAR_SIZE or try --base-virtaddr",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR,
|
||||
"port %u failed to reserve UAR address space, please"
|
||||
" adjust MLX5_UAR_SIZE or try --base-virtaddr",
|
||||
dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
/* Accept either same addr or a new addr returned from mmap if target
|
||||
* range occupied.
|
||||
*/
|
||||
INFO("port %u reserved UAR address space: %p", dev->data->port_id,
|
||||
addr);
|
||||
DRV_LOG(INFO, "port %u reserved UAR address space: %p",
|
||||
dev->data->port_id, addr);
|
||||
priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
|
||||
uar_base = addr; /* process local, don't reserve again. */
|
||||
return 0;
|
||||
@ -549,21 +554,23 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
|
||||
addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
|
||||
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
ERROR("port %u UAR mmap failed: %p size: %llu",
|
||||
dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
|
||||
DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
|
||||
dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (priv->uar_base != addr) {
|
||||
ERROR("port %u UAR address %p size %llu occupied, please adjust "
|
||||
"MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
|
||||
dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
|
||||
DRV_LOG(ERR,
|
||||
"port %u UAR address %p size %llu occupied, please"
|
||||
" adjust MLX5_UAR_OFFSET or try EAL parameter"
|
||||
" --base-virtaddr",
|
||||
dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
uar_base = addr; /* process local, don't reserve again */
|
||||
INFO("port %u reserved UAR address space: %p", dev->data->port_id,
|
||||
addr);
|
||||
DRV_LOG(INFO, "port %u reserved UAR address space: %p",
|
||||
dev->data->port_id, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -604,11 +611,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
/* Get mlx5_dev[] index. */
|
||||
idx = mlx5_dev_idx(&pci_dev->addr);
|
||||
if (idx == -1) {
|
||||
ERROR("this driver cannot support any more adapters");
|
||||
DRV_LOG(ERR, "this driver cannot support any more adapters");
|
||||
err = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("using driver device index %d", idx);
|
||||
DRV_LOG(DEBUG, "using driver device index %d", idx);
|
||||
/* Save PCI address. */
|
||||
mlx5_dev[idx].pci_addr = pci_dev->addr;
|
||||
list = mlx5_glue->get_device_list(&i);
|
||||
@ -616,7 +623,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
assert(errno);
|
||||
err = errno;
|
||||
if (errno == ENOSYS)
|
||||
ERROR("cannot list devices, is ib_uverbs loaded?");
|
||||
DRV_LOG(ERR,
|
||||
"cannot list devices, is ib_uverbs loaded?");
|
||||
goto error;
|
||||
}
|
||||
assert(i >= 0);
|
||||
@ -628,7 +636,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
struct rte_pci_addr pci_addr;
|
||||
|
||||
--i;
|
||||
DEBUG("checking device \"%s\"", list[i]->name);
|
||||
DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
|
||||
if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
|
||||
continue;
|
||||
if ((pci_dev->addr.domain != pci_addr.domain) ||
|
||||
@ -636,8 +644,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
(pci_dev->addr.devid != pci_addr.devid) ||
|
||||
(pci_dev->addr.function != pci_addr.function))
|
||||
continue;
|
||||
INFO("PCI information matches, using device \"%s\"",
|
||||
list[i]->name);
|
||||
DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
|
||||
list[i]->name);
|
||||
attr_ctx = mlx5_glue->open_device(list[i]);
|
||||
rte_errno = errno;
|
||||
err = rte_errno;
|
||||
@ -647,16 +655,18 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
mlx5_glue->free_device_list(list);
|
||||
switch (err) {
|
||||
case 0:
|
||||
ERROR("cannot access device, is mlx5_ib loaded?");
|
||||
DRV_LOG(ERR,
|
||||
"cannot access device, is mlx5_ib loaded?");
|
||||
err = ENODEV;
|
||||
goto error;
|
||||
case EINVAL:
|
||||
ERROR("cannot use device, are drivers up to date?");
|
||||
DRV_LOG(ERR,
|
||||
"cannot use device, are drivers up to date?");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
ibv_dev = list[i];
|
||||
DEBUG("device opened");
|
||||
DRV_LOG(DEBUG, "device opened");
|
||||
/*
|
||||
* Multi-packet send is supported by ConnectX-4 Lx PF as well
|
||||
* as all ConnectX-5 devices.
|
||||
@ -667,14 +677,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
|
||||
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
|
||||
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
|
||||
DEBUG("enhanced MPW is supported");
|
||||
DRV_LOG(DEBUG, "enhanced MPW is supported");
|
||||
mps = MLX5_MPW_ENHANCED;
|
||||
} else {
|
||||
DEBUG("MPW is supported");
|
||||
DRV_LOG(DEBUG, "MPW is supported");
|
||||
mps = MLX5_MPW;
|
||||
}
|
||||
} else {
|
||||
DEBUG("MPW isn't supported");
|
||||
DRV_LOG(DEBUG, "MPW isn't supported");
|
||||
mps = MLX5_MPW_DISABLED;
|
||||
}
|
||||
if (RTE_CACHE_LINE_SIZE == 128 &&
|
||||
@ -689,15 +699,18 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
(attrs_out.tunnel_offloads_caps &
|
||||
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
|
||||
}
|
||||
DEBUG("tunnel offloading is %ssupported", tunnel_en ? "" : "not ");
|
||||
DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
|
||||
tunnel_en ? "" : "not ");
|
||||
#else
|
||||
WARN("tunnel offloading disabled due to old OFED/rdma-core version");
|
||||
DRV_LOG(WARNING,
|
||||
"tunnel offloading disabled due to old OFED/rdma-core version");
|
||||
#endif
|
||||
if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
|
||||
err = errno;
|
||||
goto error;
|
||||
}
|
||||
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
|
||||
DRV_LOG(INFO, "%u port(s) detected",
|
||||
device_attr.orig_attr.phys_port_cnt);
|
||||
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
|
||||
char name[RTE_ETH_NAME_MAX_LEN];
|
||||
int len;
|
||||
@ -732,7 +745,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
|
||||
eth_dev = rte_eth_dev_attach_secondary(name);
|
||||
if (eth_dev == NULL) {
|
||||
ERROR("can not attach rte ethdev");
|
||||
DRV_LOG(ERR, "can not attach rte ethdev");
|
||||
rte_errno = ENOMEM;
|
||||
err = rte_errno;
|
||||
goto error;
|
||||
@ -761,7 +774,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
mlx5_select_tx_function(eth_dev);
|
||||
continue;
|
||||
}
|
||||
DEBUG("using port %u (%08" PRIx32 ")", port, test);
|
||||
DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
|
||||
ctx = mlx5_glue->open_device(ibv_dev);
|
||||
if (ctx == NULL) {
|
||||
err = ENODEV;
|
||||
@ -771,23 +784,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
/* Check port status. */
|
||||
err = mlx5_glue->query_port(ctx, port, &port_attr);
|
||||
if (err) {
|
||||
ERROR("port query failed: %s", strerror(err));
|
||||
DRV_LOG(ERR, "port query failed: %s", strerror(err));
|
||||
goto port_error;
|
||||
}
|
||||
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
|
||||
ERROR("port %d is not configured in Ethernet mode",
|
||||
port);
|
||||
DRV_LOG(ERR,
|
||||
"port %d is not configured in Ethernet mode",
|
||||
port);
|
||||
err = EINVAL;
|
||||
goto port_error;
|
||||
}
|
||||
if (port_attr.state != IBV_PORT_ACTIVE)
|
||||
DEBUG("port %d is not active: \"%s\" (%d)",
|
||||
port, mlx5_glue->port_state_str(port_attr.state),
|
||||
port_attr.state);
|
||||
DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
|
||||
port,
|
||||
mlx5_glue->port_state_str(port_attr.state),
|
||||
port_attr.state);
|
||||
/* Allocate protection domain. */
|
||||
pd = mlx5_glue->alloc_pd(ctx);
|
||||
if (pd == NULL) {
|
||||
ERROR("PD allocation failure");
|
||||
DRV_LOG(ERR, "PD allocation failure");
|
||||
err = ENOMEM;
|
||||
goto port_error;
|
||||
}
|
||||
@ -797,7 +812,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
sizeof(*priv),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (priv == NULL) {
|
||||
ERROR("priv allocation failure");
|
||||
DRV_LOG(ERR, "priv allocation failure");
|
||||
err = ENOMEM;
|
||||
goto port_error;
|
||||
}
|
||||
@ -810,25 +825,26 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
priv->mtu = ETHER_MTU;
|
||||
err = mlx5_args(&config, pci_dev->device.devargs);
|
||||
if (err) {
|
||||
ERROR("failed to process device arguments: %s",
|
||||
strerror(err));
|
||||
DRV_LOG(ERR, "failed to process device arguments: %s",
|
||||
strerror(err));
|
||||
goto port_error;
|
||||
}
|
||||
if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
|
||||
ERROR("ibv_query_device_ex() failed");
|
||||
DRV_LOG(ERR, "ibv_query_device_ex() failed");
|
||||
err = errno;
|
||||
goto port_error;
|
||||
}
|
||||
config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
|
||||
IBV_DEVICE_RAW_IP_CSUM);
|
||||
DEBUG("checksum offloading is %ssupported",
|
||||
(config.hw_csum ? "" : "not "));
|
||||
DRV_LOG(DEBUG, "checksum offloading is %ssupported",
|
||||
(config.hw_csum ? "" : "not "));
|
||||
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
||||
config.flow_counter_en = !!(device_attr.max_counter_sets);
|
||||
mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
|
||||
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
|
||||
cs_desc.counter_type, cs_desc.num_of_cs,
|
||||
cs_desc.attributes);
|
||||
DRV_LOG(DEBUG,
|
||||
"counter type = %d, num of cs = %ld, attributes = %d",
|
||||
cs_desc.counter_type, cs_desc.num_of_cs,
|
||||
cs_desc.attributes);
|
||||
#endif
|
||||
config.ind_table_max_size =
|
||||
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
|
||||
@ -837,23 +853,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
if (config.ind_table_max_size >
|
||||
(unsigned int)ETH_RSS_RETA_SIZE_512)
|
||||
config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
|
||||
DEBUG("maximum Rx indirection table size is %u",
|
||||
config.ind_table_max_size);
|
||||
DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
|
||||
config.ind_table_max_size);
|
||||
config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
|
||||
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
|
||||
DEBUG("VLAN stripping is %ssupported",
|
||||
(config.hw_vlan_strip ? "" : "not "));
|
||||
DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
|
||||
(config.hw_vlan_strip ? "" : "not "));
|
||||
|
||||
config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
|
||||
IBV_RAW_PACKET_CAP_SCATTER_FCS);
|
||||
DEBUG("FCS stripping configuration is %ssupported",
|
||||
(config.hw_fcs_strip ? "" : "not "));
|
||||
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
|
||||
(config.hw_fcs_strip ? "" : "not "));
|
||||
|
||||
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
|
||||
config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
|
||||
#endif
|
||||
DEBUG("hardware Rx end alignment padding is %ssupported",
|
||||
(config.hw_padding ? "" : "not "));
|
||||
DRV_LOG(DEBUG,
|
||||
"hardware Rx end alignment padding is %ssupported",
|
||||
(config.hw_padding ? "" : "not "));
|
||||
config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
|
||||
(device_attr_ex.tso_caps.supported_qpts &
|
||||
(1 << IBV_QPT_RAW_PACKET)));
|
||||
@ -861,21 +878,23 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
config.tso_max_payload_sz =
|
||||
device_attr_ex.tso_caps.max_tso;
|
||||
if (config.mps && !mps) {
|
||||
ERROR("multi-packet send not supported on this device"
|
||||
" (" MLX5_TXQ_MPW_EN ")");
|
||||
DRV_LOG(ERR,
|
||||
"multi-packet send not supported on this device"
|
||||
" (" MLX5_TXQ_MPW_EN ")");
|
||||
err = ENOTSUP;
|
||||
goto port_error;
|
||||
}
|
||||
INFO("%s MPS is %s",
|
||||
config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
|
||||
config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
|
||||
DRV_LOG(INFO, "%s MPS is %s",
|
||||
config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
|
||||
config.mps != MLX5_MPW_DISABLED ? "enabled" :
|
||||
"disabled");
|
||||
if (config.cqe_comp && !cqe_comp) {
|
||||
WARN("Rx CQE compression isn't supported");
|
||||
DRV_LOG(WARNING, "Rx CQE compression isn't supported");
|
||||
config.cqe_comp = 0;
|
||||
}
|
||||
eth_dev = rte_eth_dev_allocate(name);
|
||||
if (eth_dev == NULL) {
|
||||
ERROR("can not allocate rte ethdev");
|
||||
DRV_LOG(ERR, "can not allocate rte ethdev");
|
||||
err = ENOMEM;
|
||||
goto port_error;
|
||||
}
|
||||
@ -890,34 +909,37 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
goto port_error;
|
||||
/* Configure the first MAC address by default. */
|
||||
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
|
||||
ERROR("port %u cannot get MAC address, is mlx5_en"
|
||||
" loaded? (errno: %s)", eth_dev->data->port_id,
|
||||
strerror(errno));
|
||||
DRV_LOG(ERR,
|
||||
"port %u cannot get MAC address, is mlx5_en"
|
||||
" loaded? (errno: %s)",
|
||||
eth_dev->data->port_id, strerror(errno));
|
||||
err = ENODEV;
|
||||
goto port_error;
|
||||
}
|
||||
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
eth_dev->data->port_id,
|
||||
mac.addr_bytes[0], mac.addr_bytes[1],
|
||||
mac.addr_bytes[2], mac.addr_bytes[3],
|
||||
mac.addr_bytes[4], mac.addr_bytes[5]);
|
||||
DRV_LOG(INFO,
|
||||
"port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
eth_dev->data->port_id,
|
||||
mac.addr_bytes[0], mac.addr_bytes[1],
|
||||
mac.addr_bytes[2], mac.addr_bytes[3],
|
||||
mac.addr_bytes[4], mac.addr_bytes[5]);
|
||||
#ifndef NDEBUG
|
||||
{
|
||||
char ifname[IF_NAMESIZE];
|
||||
|
||||
if (mlx5_get_ifname(eth_dev, &ifname) == 0)
|
||||
DEBUG("port %u ifname is \"%s\"",
|
||||
eth_dev->data->port_id, ifname);
|
||||
DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
|
||||
eth_dev->data->port_id, ifname);
|
||||
else
|
||||
DEBUG("port %u ifname is unknown",
|
||||
eth_dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u ifname is unknown",
|
||||
eth_dev->data->port_id);
|
||||
}
|
||||
#endif
|
||||
/* Get actual MTU if possible. */
|
||||
err = mlx5_get_mtu(eth_dev, &priv->mtu);
|
||||
if (err)
|
||||
goto port_error;
|
||||
DEBUG("port %u MTU is %u", eth_dev->data->port_id, priv->mtu);
|
||||
DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
|
||||
priv->mtu);
|
||||
/*
|
||||
* Initialize burst functions to prevent crashes before link-up.
|
||||
*/
|
||||
@ -938,8 +960,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
|
||||
(void *)((uintptr_t)&alctr));
|
||||
/* Bring Ethernet device up. */
|
||||
DEBUG("port %u forcing Ethernet interface up",
|
||||
eth_dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
|
||||
eth_dev->data->port_id);
|
||||
mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP);
|
||||
/* Store device configuration on private structure. */
|
||||
priv->config = config;
|
||||
@ -1060,9 +1082,10 @@ mlx5_glue_path(char *buf, size_t size)
|
||||
goto error;
|
||||
return buf;
|
||||
error:
|
||||
ERROR("unable to append \"-glue\" to last component of"
|
||||
" RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
|
||||
" please re-configure DPDK");
|
||||
DRV_LOG(ERR,
|
||||
"unable to append \"-glue\" to last component of"
|
||||
" RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
|
||||
" please re-configure DPDK");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1117,7 +1140,8 @@ mlx5_glue_init(void)
|
||||
break;
|
||||
if (sizeof(name) != (size_t)ret + 1)
|
||||
continue;
|
||||
DEBUG("looking for rdma-core glue as \"%s\"", name);
|
||||
DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
|
||||
name);
|
||||
handle = dlopen(name, RTLD_LAZY);
|
||||
break;
|
||||
} while (1);
|
||||
@ -1129,7 +1153,7 @@ mlx5_glue_init(void)
|
||||
rte_errno = EINVAL;
|
||||
dlmsg = dlerror();
|
||||
if (dlmsg)
|
||||
WARN("cannot load glue library: %s", dlmsg);
|
||||
DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
|
||||
goto glue_error;
|
||||
}
|
||||
sym = dlsym(handle, "mlx5_glue");
|
||||
@ -1137,7 +1161,7 @@ mlx5_glue_init(void)
|
||||
rte_errno = EINVAL;
|
||||
dlmsg = dlerror();
|
||||
if (dlmsg)
|
||||
ERROR("cannot resolve glue symbol: %s", dlmsg);
|
||||
DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
|
||||
goto glue_error;
|
||||
}
|
||||
mlx5_glue = *sym;
|
||||
@ -1145,9 +1169,9 @@ mlx5_glue_init(void)
|
||||
glue_error:
|
||||
if (handle)
|
||||
dlclose(handle);
|
||||
WARN("cannot initialize PMD due to missing run-time"
|
||||
" dependency on rdma-core libraries (libibverbs,"
|
||||
" libmlx5)");
|
||||
DRV_LOG(WARNING,
|
||||
"cannot initialize PMD due to missing run-time dependency on"
|
||||
" rdma-core libraries (libibverbs, libmlx5)");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
@ -1187,8 +1211,9 @@ rte_mlx5_pmd_init(void)
|
||||
}
|
||||
#endif
|
||||
if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
|
||||
ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
|
||||
mlx5_glue->version, MLX5_GLUE_VERSION);
|
||||
DRV_LOG(ERR,
|
||||
"rdma-core glue \"%s\" mismatch: \"%s\" is required",
|
||||
mlx5_glue->version, MLX5_GLUE_VERSION);
|
||||
return;
|
||||
}
|
||||
mlx5_glue->fork_init();
|
||||
@ -1198,3 +1223,11 @@ rte_mlx5_pmd_init(void)
|
||||
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
|
||||
RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
|
||||
|
||||
/** Initialize driver log type. */
|
||||
RTE_INIT(vdev_netvsc_init_log)
|
||||
{
|
||||
mlx5_logtype = rte_log_register("pmd.net.mlx5");
|
||||
if (mlx5_logtype >= 0)
|
||||
rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
|
||||
}
|
||||
|
@ -313,16 +313,18 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
|
||||
ERROR("port %u some Tx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
dev->data->port_id, tx_offloads, supp_tx_offloads);
|
||||
DRV_LOG(ERR,
|
||||
"port %u some Tx offloads are not supported requested"
|
||||
" 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
dev->data->port_id, tx_offloads, supp_tx_offloads);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
|
||||
ERROR("port %u some Rx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
dev->data->port_id, rx_offloads, supp_rx_offloads);
|
||||
DRV_LOG(ERR,
|
||||
"port %u some Rx offloads are not supported requested"
|
||||
" 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
dev->data->port_id, rx_offloads, supp_rx_offloads);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -337,8 +339,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
rte_realloc(priv->rss_conf.rss_key,
|
||||
rss_hash_default_key_len, 0);
|
||||
if (!priv->rss_conf.rss_key) {
|
||||
ERROR("port %u cannot allocate RSS hash key memory (%u)",
|
||||
dev->data->port_id, rxqs_n);
|
||||
DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
|
||||
dev->data->port_id, rxqs_n);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -352,20 +354,20 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
priv->rxqs = (void *)dev->data->rx_queues;
|
||||
priv->txqs = (void *)dev->data->tx_queues;
|
||||
if (txqs_n != priv->txqs_n) {
|
||||
INFO("port %u Tx queues number update: %u -> %u",
|
||||
dev->data->port_id, priv->txqs_n, txqs_n);
|
||||
DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
|
||||
dev->data->port_id, priv->txqs_n, txqs_n);
|
||||
priv->txqs_n = txqs_n;
|
||||
}
|
||||
if (rxqs_n > priv->config.ind_table_max_size) {
|
||||
ERROR("port %u cannot handle this many Rx queues (%u)",
|
||||
dev->data->port_id, rxqs_n);
|
||||
DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
|
||||
dev->data->port_id, rxqs_n);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rxqs_n == priv->rxqs_n)
|
||||
return 0;
|
||||
INFO("port %u Rx queues number update: %u -> %u",
|
||||
dev->data->port_id, priv->rxqs_n, rxqs_n);
|
||||
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
|
||||
dev->data->port_id, priv->rxqs_n, rxqs_n);
|
||||
priv->rxqs_n = rxqs_n;
|
||||
/* If the requested number of RX queues is not a power of two, use the
|
||||
* maximum indirection table size for better balancing.
|
||||
@ -491,8 +493,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
|
||||
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
memset(&dev_link, 0, sizeof(dev_link));
|
||||
@ -501,8 +503,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
ifr.ifr_data = (void *)&edata;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING,
|
||||
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
link_speed = ethtool_cmd_speed(&edata);
|
||||
@ -558,8 +561,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
|
||||
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
memset(&dev_link, 0, sizeof(dev_link));
|
||||
@ -568,8 +571,10 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
ifr.ifr_data = (void *)&gcmd;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
|
||||
" failed: %s", dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
|
||||
" failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
|
||||
@ -583,8 +588,10 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
ifr.ifr_data = (void *)ecmd;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
|
||||
" failed: %s", dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
|
||||
" failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
dev_link.link_speed = ecmd->speed;
|
||||
@ -655,14 +662,17 @@ mlx5_link_start(struct rte_eth_dev *dev)
|
||||
dev->rx_pkt_burst = mlx5_select_rx_function(dev);
|
||||
ret = mlx5_traffic_enable(dev);
|
||||
if (ret) {
|
||||
ERROR("port %u error occurred while configuring control flows:"
|
||||
" %s", dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR,
|
||||
"port %u error occurred while configuring control"
|
||||
" flows: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
ret = mlx5_flow_start(dev, &priv->flows);
|
||||
if (ret)
|
||||
ERROR("port %u error occurred while configuring flows: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR,
|
||||
"port %u error occurred while configuring flows: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -783,7 +793,8 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return ret;
|
||||
if (kern_mtu == mtu) {
|
||||
priv->mtu = mtu;
|
||||
DEBUG("port %u adapter MTU set to %u", dev->data->port_id, mtu);
|
||||
DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
|
||||
dev->data->port_id, mtu);
|
||||
return 0;
|
||||
}
|
||||
rte_errno = EAGAIN;
|
||||
@ -813,8 +824,10 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
ifr.ifr_data = (void *)ðpause;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
|
||||
" %s", dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING,
|
||||
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
|
||||
" %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
fc_conf->autoneg = ethpause.autoneg;
|
||||
@ -864,8 +877,10 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
ethpause.tx_pause = 0;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
|
||||
" failed: %s", dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING,
|
||||
"port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
|
||||
" failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@ -994,8 +1009,9 @@ mlx5_dev_status_handler(struct rte_eth_dev *dev)
|
||||
dev->data->dev_conf.intr_conf.rmv == 1)
|
||||
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
|
||||
else
|
||||
DEBUG("port %u event type %d on not handled",
|
||||
dev->data->port_id, event.event_type);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u event type %d on not handled",
|
||||
dev->data->port_id, event.event_type);
|
||||
mlx5_glue->ack_async_event(&event);
|
||||
}
|
||||
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
|
||||
@ -1103,8 +1119,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
|
||||
flags = fcntl(priv->ctx->async_fd, F_GETFL);
|
||||
ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
|
||||
if (ret) {
|
||||
INFO("port %u failed to change file descriptor async event"
|
||||
" queue", dev->data->port_id);
|
||||
DRV_LOG(INFO,
|
||||
"port %u failed to change file descriptor async event"
|
||||
" queue",
|
||||
dev->data->port_id);
|
||||
dev->data->dev_conf.intr_conf.lsc = 0;
|
||||
dev->data->dev_conf.intr_conf.rmv = 0;
|
||||
}
|
||||
@ -1117,8 +1135,8 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
|
||||
}
|
||||
ret = mlx5_socket_init(dev);
|
||||
if (ret)
|
||||
ERROR("port %u cannot initialise socket: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot initialise socket: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
else if (priv->primary_socket) {
|
||||
priv->intr_handle_socket.fd = priv->primary_socket;
|
||||
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
|
||||
@ -1188,20 +1206,24 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
|
||||
tx_pkt_burst = mlx5_tx_burst_raw_vec;
|
||||
else
|
||||
tx_pkt_burst = mlx5_tx_burst_vec;
|
||||
DEBUG("port %u selected enhanced MPW Tx vectorized"
|
||||
" function", dev->data->port_id);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u selected enhanced MPW Tx vectorized"
|
||||
" function",
|
||||
dev->data->port_id);
|
||||
} else {
|
||||
tx_pkt_burst = mlx5_tx_burst_empw;
|
||||
DEBUG("port %u selected enhanced MPW Tx function",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u selected enhanced MPW Tx function",
|
||||
dev->data->port_id);
|
||||
}
|
||||
} else if (config->mps && (config->txq_inline > 0)) {
|
||||
tx_pkt_burst = mlx5_tx_burst_mpw_inline;
|
||||
DEBUG("port %u selected MPW inline Tx function",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
|
||||
dev->data->port_id);
|
||||
} else if (config->mps) {
|
||||
tx_pkt_burst = mlx5_tx_burst_mpw;
|
||||
DEBUG("port %u selected MPW Tx function", dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u selected MPW Tx function",
|
||||
dev->data->port_id);
|
||||
}
|
||||
return tx_pkt_burst;
|
||||
}
|
||||
@ -1223,8 +1245,8 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
|
||||
assert(dev != NULL);
|
||||
if (mlx5_check_vec_rx_support(dev) > 0) {
|
||||
rx_pkt_burst = mlx5_rx_burst_vec;
|
||||
DEBUG("port %u selected Rx vectorized function",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
|
||||
dev->data->port_id);
|
||||
}
|
||||
return rx_pkt_burst;
|
||||
}
|
||||
|
@ -1818,11 +1818,11 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
goto error;
|
||||
}
|
||||
++flows_n;
|
||||
DEBUG("port %u %p type %d QP %p ibv_flow %p",
|
||||
dev->data->port_id,
|
||||
(void *)flow, i,
|
||||
(void *)flow->frxq[i].hrxq,
|
||||
(void *)flow->frxq[i].ibv_flow);
|
||||
DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
|
||||
dev->data->port_id,
|
||||
(void *)flow, i,
|
||||
(void *)flow->frxq[i].hrxq,
|
||||
(void *)flow->frxq[i].ibv_flow);
|
||||
}
|
||||
if (!flows_n) {
|
||||
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
@ -1922,11 +1922,12 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
|
||||
if (ret)
|
||||
goto exit;
|
||||
TAILQ_INSERT_TAIL(list, flow, next);
|
||||
DEBUG("port %u flow created %p", dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id,
|
||||
(void *)flow);
|
||||
return flow;
|
||||
exit:
|
||||
ERROR("port %u flow creation error: %s", dev->data->port_id,
|
||||
error->message);
|
||||
DRV_LOG(ERR, "port %u flow creation error: %s", dev->data->port_id,
|
||||
error->message);
|
||||
for (i = 0; i != hash_rxq_init_n; ++i) {
|
||||
if (parser.queue[i].ibv_attr)
|
||||
rte_free(parser.queue[i].ibv_attr);
|
||||
@ -2044,7 +2045,8 @@ free:
|
||||
flow->cs = NULL;
|
||||
}
|
||||
TAILQ_REMOVE(list, flow, next);
|
||||
DEBUG("port %u flow destroyed %p", dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id,
|
||||
(void *)flow);
|
||||
rte_free(flow);
|
||||
}
|
||||
|
||||
@ -2086,15 +2088,16 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
assert(priv->ctx);
|
||||
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
|
||||
if (!fdq) {
|
||||
WARN("port %u cannot allocate memory for drop queue",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u cannot allocate memory for drop queue",
|
||||
dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
if (!fdq->cq) {
|
||||
WARN("port %u cannot allocate CQ for drop queue",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue",
|
||||
dev->data->port_id);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -2108,8 +2111,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
.cq = fdq->cq,
|
||||
});
|
||||
if (!fdq->wq) {
|
||||
WARN("port %u cannot allocate WQ for drop queue",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue",
|
||||
dev->data->port_id);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -2121,8 +2124,10 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
.comp_mask = 0,
|
||||
});
|
||||
if (!fdq->ind_table) {
|
||||
WARN("port %u cannot allocate indirection table for drop"
|
||||
" queue", dev->data->port_id);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u cannot allocate indirection table for drop"
|
||||
" queue",
|
||||
dev->data->port_id);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -2145,8 +2150,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
.pd = priv->pd
|
||||
});
|
||||
if (!fdq->qp) {
|
||||
WARN("port %u cannot allocate QP for drop queue",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue",
|
||||
dev->data->port_id);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -2217,8 +2222,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
claim_zero(mlx5_glue->destroy_flow
|
||||
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
|
||||
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
|
||||
DEBUG("port %u flow %p removed", dev->data->port_id,
|
||||
(void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow %p removed",
|
||||
dev->data->port_id, (void *)flow);
|
||||
/* Next flow. */
|
||||
continue;
|
||||
}
|
||||
@ -2251,8 +2256,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
|
||||
flow->frxq[i].hrxq = NULL;
|
||||
}
|
||||
DEBUG("port %u flow %p removed", dev->data->port_id,
|
||||
(void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
|
||||
(void *)flow);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2282,14 +2287,14 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
(priv->flow_drop_queue->qp,
|
||||
flow->frxq[HASH_RXQ_ETH].ibv_attr);
|
||||
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
|
||||
DEBUG("port %u flow %p cannot be applied",
|
||||
dev->data->port_id,
|
||||
(void *)flow);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u flow %p cannot be applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("port %u flow %p applied", dev->data->port_id,
|
||||
(void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow %p applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
/* Next flow. */
|
||||
continue;
|
||||
}
|
||||
@ -2311,8 +2316,9 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
(*flow->queues),
|
||||
flow->queues_n);
|
||||
if (!flow->frxq[i].hrxq) {
|
||||
DEBUG("port %u flow %p cannot be applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u flow %p cannot be applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -2321,13 +2327,14 @@ flow_create:
|
||||
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
|
||||
flow->frxq[i].ibv_attr);
|
||||
if (!flow->frxq[i].ibv_flow) {
|
||||
DEBUG("port %u flow %p cannot be applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u flow %p cannot be applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("port %u flow %p applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow %p applied",
|
||||
dev->data->port_id, (void *)flow);
|
||||
}
|
||||
if (!flow->mark)
|
||||
continue;
|
||||
@ -2353,8 +2360,8 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
TAILQ_FOREACH(flow, &priv->flows, next) {
|
||||
DEBUG("port %u flow %p still referenced",
|
||||
dev->data->port_id, (void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u flow %p still referenced",
|
||||
dev->data->port_id, (void *)flow);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
@ -2625,8 +2632,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
|
||||
/* Validate queue number. */
|
||||
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
|
||||
ERROR("port %u invalid queue number %d",
|
||||
dev->data->port_id, fdir_filter->action.rx_queue);
|
||||
DRV_LOG(ERR, "port %u invalid queue number %d",
|
||||
dev->data->port_id, fdir_filter->action.rx_queue);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -2649,9 +2656,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
};
|
||||
break;
|
||||
default:
|
||||
ERROR("port %u invalid behavior %d",
|
||||
dev->data->port_id,
|
||||
fdir_filter->action.behavior);
|
||||
DRV_LOG(ERR, "port %u invalid behavior %d",
|
||||
dev->data->port_id,
|
||||
fdir_filter->action.behavior);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -2787,8 +2794,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
};
|
||||
break;
|
||||
default:
|
||||
ERROR("port %u invalid flow type%d",
|
||||
dev->data->port_id, fdir_filter->input.flow_type);
|
||||
DRV_LOG(ERR, "port %u invalid flow type%d",
|
||||
dev->data->port_id, fdir_filter->input.flow_type);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -2837,8 +2844,8 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
||||
attributes.items, attributes.actions,
|
||||
&error);
|
||||
if (flow) {
|
||||
DEBUG("port %u FDIR created %p", dev->data->port_id,
|
||||
(void *)flow);
|
||||
DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
|
||||
(void *)flow);
|
||||
return 0;
|
||||
}
|
||||
return -rte_errno;
|
||||
@ -3032,8 +3039,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
return 0;
|
||||
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
|
||||
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
|
||||
ERROR("port %u flow director mode %d not supported",
|
||||
dev->data->port_id, fdir_mode);
|
||||
DRV_LOG(ERR, "port %u flow director mode %d not supported",
|
||||
dev->data->port_id, fdir_mode);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -3051,8 +3058,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
mlx5_fdir_info_get(dev, arg);
|
||||
break;
|
||||
default:
|
||||
DEBUG("port %u unknown operation %u", dev->data->port_id,
|
||||
filter_op);
|
||||
DRV_LOG(DEBUG, "port %u unknown operation %u",
|
||||
dev->data->port_id, filter_op);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -3091,8 +3098,8 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
case RTE_ETH_FILTER_FDIR:
|
||||
return mlx5_fdir_ctrl_func(dev, filter_op, arg);
|
||||
default:
|
||||
ERROR("port %u filter type (%d) not supported",
|
||||
dev->data->port_id, filter_type);
|
||||
DRV_LOG(ERR, "port %u filter type (%d) not supported",
|
||||
dev->data->port_id, filter_type);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
@ -73,8 +73,8 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
||||
int ret = mlx5_traffic_restart(dev);
|
||||
|
||||
if (ret)
|
||||
ERROR("port %u cannot remove mac address: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot remove mac address: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,9 +130,11 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
DEBUG("port %u setting primary MAC address", dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u setting primary MAC address",
|
||||
dev->data->port_id);
|
||||
|
||||
ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
|
||||
if (ret)
|
||||
ERROR("port %u cannot set mac address: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot set mac address: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
@ -104,17 +104,18 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
|
||||
rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
|
||||
/* Add a new entry, register MR first. */
|
||||
DEBUG("port %u discovered new memory pool \"%s\" (%p)",
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
|
||||
DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)",
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
|
||||
dev = txq_ctrl->priv->dev;
|
||||
mr = mlx5_mr_get(dev, mp);
|
||||
if (mr == NULL) {
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
||||
DEBUG("port %u using unregistered mempool 0x%p(%s) in "
|
||||
"secondary process, please create mempool before "
|
||||
" rte_eth_dev_start()",
|
||||
txq_ctrl->priv->dev->data->port_id,
|
||||
(void *)mp, mp->name);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u using unregistered mempool 0x%p(%s)"
|
||||
" in secondary process, please create mempool"
|
||||
" before rte_eth_dev_start()",
|
||||
txq_ctrl->priv->dev->data->port_id,
|
||||
(void *)mp, mp->name);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
rte_errno = ENOTSUP;
|
||||
return NULL;
|
||||
@ -122,17 +123,19 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
mr = mlx5_mr_new(dev, mp);
|
||||
}
|
||||
if (unlikely(mr == NULL)) {
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u unable to configure memory region,"
|
||||
" ibv_reg_mr() failed.",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
|
||||
" failed",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
return NULL;
|
||||
}
|
||||
if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
|
||||
/* Table is full, remove oldest entry. */
|
||||
DEBUG("port %u memroy region <-> memory pool table full, "
|
||||
" dropping oldest entry",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u memory region <-> memory pool table full, "
|
||||
" dropping oldest entry",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
--idx;
|
||||
mlx5_mr_release(txq->mp2mr[0]);
|
||||
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
|
||||
@ -140,9 +143,11 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
}
|
||||
/* Store the new entry. */
|
||||
txq_ctrl->txq.mp2mr[idx] = mr;
|
||||
DEBUG("port %u new memory region lkey for MP \"%s\" (%p): 0x%08" PRIu32,
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
|
||||
txq_ctrl->txq.mp2mr[idx]->lkey);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u new memory region lkey for MP \"%s\" (%p): 0x%08"
|
||||
PRIu32,
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
|
||||
txq_ctrl->txq.mp2mr[idx]->lkey);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
return mr;
|
||||
}
|
||||
@ -209,8 +214,8 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
|
||||
}
|
||||
mr = mlx5_mr_new(priv->dev, mp);
|
||||
if (!mr)
|
||||
ERROR("port %u cannot create memory region: %s",
|
||||
priv->dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot create memory region: %s",
|
||||
priv->dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -237,21 +242,22 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
|
||||
mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
|
||||
if (!mr) {
|
||||
DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
|
||||
" failed",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u unable to configure memory region,"
|
||||
" ibv_reg_mr() failed.",
|
||||
dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
if (mlx5_check_mempool(mp, &start, &end) != 0) {
|
||||
ERROR("port %u mempool %p: not virtually contiguous",
|
||||
dev->data->port_id, (void *)mp);
|
||||
DRV_LOG(ERR, "port %u mempool %p: not virtually contiguous",
|
||||
dev->data->port_id, (void *)mp);
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
DEBUG("port %u mempool %p area start=%p end=%p size=%zu",
|
||||
dev->data->port_id, (void *)mp, (void *)start, (void *)end,
|
||||
(size_t)(end - start));
|
||||
DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p size=%zu",
|
||||
dev->data->port_id, (void *)mp, (void *)start, (void *)end,
|
||||
(size_t)(end - start));
|
||||
/* Save original addresses for exact MR lookup. */
|
||||
mr->start = start;
|
||||
mr->end = end;
|
||||
@ -266,10 +272,11 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
if ((end > addr) && (end < addr + len))
|
||||
end = RTE_ALIGN_CEIL(end, align);
|
||||
}
|
||||
DEBUG("port %u mempool %p using start=%p end=%p size=%zu for memory"
|
||||
" region",
|
||||
dev->data->port_id, (void *)mp, (void *)start, (void *)end,
|
||||
(size_t)(end - start));
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u mempool %p using start=%p end=%p size=%zu for memory"
|
||||
" region",
|
||||
dev->data->port_id, (void *)mp, (void *)start, (void *)end,
|
||||
(size_t)(end - start));
|
||||
mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
|
||||
IBV_ACCESS_LOCAL_WRITE);
|
||||
if (!mr->mr) {
|
||||
@ -279,8 +286,8 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
mr->mp = mp;
|
||||
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
|
||||
rte_atomic32_inc(&mr->refcnt);
|
||||
DEBUG("port %u new memory region %p refcnt: %d",
|
||||
dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d",
|
||||
dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
|
||||
LIST_INSERT_HEAD(&priv->mr, mr, next);
|
||||
return mr;
|
||||
}
|
||||
@ -308,9 +315,9 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
LIST_FOREACH(mr, &priv->mr, next) {
|
||||
if (mr->mp == mp) {
|
||||
rte_atomic32_inc(&mr->refcnt);
|
||||
DEBUG("port %u memory region %p refcnt: %d",
|
||||
dev->data->port_id, (void *)mr,
|
||||
rte_atomic32_read(&mr->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u memory region %p refcnt: %d",
|
||||
dev->data->port_id, (void *)mr,
|
||||
rte_atomic32_read(&mr->refcnt));
|
||||
return mr;
|
||||
}
|
||||
}
|
||||
@ -330,8 +337,8 @@ int
|
||||
mlx5_mr_release(struct mlx5_mr *mr)
|
||||
{
|
||||
assert(mr);
|
||||
DEBUG("memory region %p refcnt: %d", (void *)mr,
|
||||
rte_atomic32_read(&mr->refcnt));
|
||||
DRV_LOG(DEBUG, "memory region %p refcnt: %d", (void *)mr,
|
||||
rte_atomic32_read(&mr->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&mr->refcnt)) {
|
||||
claim_zero(mlx5_glue->dereg_mr(mr->mr));
|
||||
LIST_REMOVE(mr, next);
|
||||
@ -358,8 +365,8 @@ mlx5_mr_verify(struct rte_eth_dev *dev)
|
||||
struct mlx5_mr *mr;
|
||||
|
||||
LIST_FOREACH(mr, &priv->mr, next) {
|
||||
DEBUG("port %u memory region %p still referenced",
|
||||
dev->data->port_id, (void *)mr);
|
||||
DRV_LOG(DEBUG, "port %u memory region %p still referenced",
|
||||
dev->data->port_id, (void *)mr);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
|
@ -37,8 +37,8 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
|
||||
dev->data->promiscuous = 1;
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("port %u cannot enable promiscuous mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -55,8 +55,8 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
|
||||
dev->data->promiscuous = 0;
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("port %u cannot disable promiscuous mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -73,8 +73,8 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
|
||||
dev->data->all_multicast = 1;
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("port %u cannot enable allmulicast mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -91,6 +91,6 @@ mlx5_allmulticast_disable(struct rte_eth_dev *dev)
|
||||
dev->data->all_multicast = 0;
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("port %u cannot disable allmulicast mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
}
|
||||
|
@ -77,8 +77,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
|
||||
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
|
||||
if (buf == NULL) {
|
||||
ERROR("port %u empty mbuf pool",
|
||||
rxq_ctrl->priv->dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u empty mbuf pool",
|
||||
rxq_ctrl->priv->dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -119,9 +119,11 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
|
||||
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
|
||||
}
|
||||
DEBUG("port %u Rx queue %u allocated and configured %u segments"
|
||||
" (max %u packets)", rxq_ctrl->priv->dev->data->port_id,
|
||||
rxq_ctrl->idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u Rx queue %u allocated and configured %u segments"
|
||||
" (max %u packets)",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
|
||||
elts_n / (1 << rxq_ctrl->rxq.sges_n));
|
||||
return 0;
|
||||
error:
|
||||
err = rte_errno; /* Save rte_errno before cleanup. */
|
||||
@ -131,8 +133,8 @@ error:
|
||||
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
|
||||
(*rxq_ctrl->rxq.elts)[i] = NULL;
|
||||
}
|
||||
DEBUG("port %u Rx queue %u failed, freed everything",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
rte_errno = err; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -152,8 +154,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
|
||||
uint16_t i;
|
||||
|
||||
DEBUG("port %u Rx queue %u freeing WRs",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
if (rxq->elts == NULL)
|
||||
return;
|
||||
/**
|
||||
@ -183,8 +185,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
void
|
||||
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
{
|
||||
DEBUG("port %u cleaning up Rx queue %u",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
if (rxq_ctrl->ibv)
|
||||
mlx5_rxq_ibv_release(rxq_ctrl->ibv);
|
||||
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
|
||||
@ -290,44 +292,47 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
|
||||
if (!rte_is_power_of_2(desc)) {
|
||||
desc = 1 << log2above(desc);
|
||||
WARN("port %u increased number of descriptors in Rx queue %u"
|
||||
" to the next power of two (%d)",
|
||||
dev->data->port_id, idx, desc);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u increased number of descriptors in Rx queue %u"
|
||||
" to the next power of two (%d)",
|
||||
dev->data->port_id, idx, desc);
|
||||
}
|
||||
DEBUG("port %u configuring Rx queue %u for %u descriptors",
|
||||
dev->data->port_id, idx, desc);
|
||||
DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
|
||||
dev->data->port_id, idx, desc);
|
||||
if (idx >= priv->rxqs_n) {
|
||||
ERROR("port %u Rx queue index out of range (%u >= %u)",
|
||||
dev->data->port_id, idx, priv->rxqs_n);
|
||||
DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
|
||||
dev->data->port_id, idx, priv->rxqs_n);
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
|
||||
ERROR("port %u Rx queue offloads 0x%" PRIx64 " don't match"
|
||||
" port offloads 0x%" PRIx64 " or supported offloads 0x%"
|
||||
PRIx64,
|
||||
dev->data->port_id, conf->offloads,
|
||||
dev->data->dev_conf.rxmode.offloads,
|
||||
(mlx5_get_rx_port_offloads() |
|
||||
mlx5_get_rx_queue_offloads(dev)));
|
||||
DRV_LOG(ERR,
|
||||
"port %u Rx queue offloads 0x%" PRIx64 " don't match"
|
||||
" port offloads 0x%" PRIx64 " or supported offloads 0x%"
|
||||
PRIx64,
|
||||
dev->data->port_id, conf->offloads,
|
||||
dev->data->dev_conf.rxmode.offloads,
|
||||
(mlx5_get_rx_port_offloads() |
|
||||
mlx5_get_rx_queue_offloads(dev)));
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_rxq_releasable(dev, idx)) {
|
||||
ERROR("port %u unable to release queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u unable to release queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = EBUSY;
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_rxq_release(dev, idx);
|
||||
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
|
||||
if (!rxq_ctrl) {
|
||||
ERROR("port %u unable to allocate queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("port %u adding Rx queue %u to list", dev->data->port_id, idx);
|
||||
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
|
||||
dev->data->port_id, idx);
|
||||
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
|
||||
return 0;
|
||||
}
|
||||
@ -380,9 +385,10 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
|
||||
if (intr_handle->intr_vec == NULL) {
|
||||
ERROR("port %u failed to allocate memory for interrupt vector,"
|
||||
" Rx interrupts will not be supported",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR,
|
||||
"port %u failed to allocate memory for interrupt"
|
||||
" vector, Rx interrupts will not be supported",
|
||||
dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -403,9 +409,11 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
continue;
|
||||
}
|
||||
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
|
||||
ERROR("port %u too many Rx queues for interrupt vector"
|
||||
" size (%d), Rx interrupts cannot be enabled",
|
||||
dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
|
||||
DRV_LOG(ERR,
|
||||
"port %u too many Rx queues for interrupt"
|
||||
" vector size (%d), Rx interrupts cannot be"
|
||||
" enabled",
|
||||
dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
@ -415,9 +423,11 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
||||
if (rc < 0) {
|
||||
rte_errno = errno;
|
||||
ERROR("port %u failed to make Rx interrupt file"
|
||||
" descriptor %d non-blocking for queue index %d",
|
||||
dev->data->port_id, fd, i);
|
||||
DRV_LOG(ERR,
|
||||
"port %u failed to make Rx interrupt file"
|
||||
" descriptor %d non-blocking for queue index"
|
||||
" %d",
|
||||
dev->data->port_id, fd, i);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -583,8 +593,8 @@ exit:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
if (rxq_ibv)
|
||||
mlx5_rxq_ibv_release(rxq_ibv);
|
||||
WARN("port %u unable to disable interrupt on Rx queue %d",
|
||||
dev->data->port_id, rx_queue_id);
|
||||
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
|
||||
dev->data->port_id, rx_queue_id);
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -632,8 +642,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
|
||||
rxq_ctrl->socket);
|
||||
if (!tmpl) {
|
||||
ERROR("port %u Rx queue %u cannot allocate verbs resources",
|
||||
dev->data->port_id, rxq_ctrl->idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Rx queue %u cannot allocate verbs resources",
|
||||
dev->data->port_id, rxq_ctrl->idx);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -643,16 +654,16 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (!tmpl->mr) {
|
||||
tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
|
||||
if (!tmpl->mr) {
|
||||
ERROR("port %u: memory region creation failure",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u: memeroy region creation failure",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (rxq_ctrl->irq) {
|
||||
tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
|
||||
if (!tmpl->channel) {
|
||||
ERROR("port %u: comp channel creation failure",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u: comp channel creation failure",
|
||||
dev->data->port_id);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -676,22 +687,24 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (mlx5_rxq_check_vec_support(rxq_data) < 0)
|
||||
attr.cq.ibv.cqe *= 2;
|
||||
} else if (config->cqe_comp && rxq_data->hw_timestamp) {
|
||||
DEBUG("port %u Rx CQE compression is disabled for HW timestamp",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u Rx CQE compression is disabled for HW"
|
||||
" timestamp",
|
||||
dev->data->port_id);
|
||||
}
|
||||
tmpl->cq = mlx5_glue->cq_ex_to_cq
|
||||
(mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
|
||||
&attr.cq.mlx5));
|
||||
if (tmpl->cq == NULL) {
|
||||
ERROR("port %u Rx queue %u CQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("port %u priv->device_attr.max_qp_wr is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
|
||||
DEBUG("port %u priv->device_attr.max_sge is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_sge);
|
||||
DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
|
||||
DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_sge);
|
||||
attr.wq = (struct ibv_wq_init_attr){
|
||||
.wq_context = NULL, /* Could be useful in the future. */
|
||||
.wq_type = IBV_WQT_RQ,
|
||||
@ -721,8 +734,8 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
#endif
|
||||
tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
|
||||
if (tmpl->wq == NULL) {
|
||||
ERROR("port %u Rx queue %u WQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -733,12 +746,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (((int)attr.wq.max_wr !=
|
||||
((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
|
||||
((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
|
||||
ERROR("port %u Rx queue %u requested %u*%u but got %u*%u"
|
||||
" WRs*SGEs",
|
||||
dev->data->port_id, idx,
|
||||
((1 << rxq_data->elts_n) >> rxq_data->sges_n),
|
||||
(1 << rxq_data->sges_n),
|
||||
attr.wq.max_wr, attr.wq.max_sge);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Rx queue %u requested %u*%u but got %u*%u"
|
||||
" WRs*SGEs",
|
||||
dev->data->port_id, idx,
|
||||
((1 << rxq_data->elts_n) >> rxq_data->sges_n),
|
||||
(1 << rxq_data->sges_n),
|
||||
attr.wq.max_wr, attr.wq.max_sge);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -749,8 +763,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
};
|
||||
ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
|
||||
if (ret) {
|
||||
ERROR("port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = ret;
|
||||
goto error;
|
||||
}
|
||||
@ -764,9 +779,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
goto error;
|
||||
}
|
||||
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
||||
ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
|
||||
"it should be set to %u", dev->data->port_id,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
DRV_LOG(ERR,
|
||||
"port %u wrong MLX5_CQE_SIZE environment variable"
|
||||
" value: it should be set to %u",
|
||||
dev->data->port_id, RTE_CACHE_LINE_SIZE);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -803,11 +819,11 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
|
||||
rte_wmb();
|
||||
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
|
||||
DEBUG("port %u rxq %u updated with %p", dev->data->port_id, idx,
|
||||
(void *)&tmpl);
|
||||
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
|
||||
idx, (void *)&tmpl);
|
||||
rte_atomic32_inc(&tmpl->refcnt);
|
||||
DEBUG("port %u Verbs Rx queue %u: refcnt %d", dev->data->port_id, idx,
|
||||
rte_atomic32_read(&tmpl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
|
||||
dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
|
||||
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
return tmpl;
|
||||
@ -852,9 +868,9 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (rxq_ctrl->ibv) {
|
||||
mlx5_mr_get(dev, rxq_data->mp);
|
||||
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
|
||||
DEBUG("port %u Verbs Rx queue %u: refcnt %d",
|
||||
dev->data->port_id, rxq_ctrl->idx,
|
||||
rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
|
||||
dev->data->port_id, rxq_ctrl->idx,
|
||||
rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
|
||||
}
|
||||
return rxq_ctrl->ibv;
|
||||
}
|
||||
@ -880,9 +896,9 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
|
||||
ret = mlx5_mr_release(rxq_ibv->mr);
|
||||
if (!ret)
|
||||
rxq_ibv->mr = NULL;
|
||||
DEBUG("port %u Verbs Rx queue %u: refcnt %d",
|
||||
rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
|
||||
rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
|
||||
rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
|
||||
rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
|
||||
rxq_free_elts(rxq_ibv->rxq_ctrl);
|
||||
claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
|
||||
@ -914,8 +930,8 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
|
||||
struct mlx5_rxq_ibv *rxq_ibv;
|
||||
|
||||
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
|
||||
DEBUG("port %u Verbs Rx queue %u still referenced",
|
||||
dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
|
||||
dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
@ -997,30 +1013,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
size = mb_len * (1 << tmpl->rxq.sges_n);
|
||||
size -= RTE_PKTMBUF_HEADROOM;
|
||||
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
|
||||
ERROR("port %u too many SGEs (%u) needed to handle"
|
||||
" requested maximum packet size %u",
|
||||
dev->data->port_id,
|
||||
1 << sges_n,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
DRV_LOG(ERR,
|
||||
"port %u too many SGEs (%u) needed to handle"
|
||||
" requested maximum packet size %u",
|
||||
dev->data->port_id,
|
||||
1 << sges_n,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
rte_errno = EOVERFLOW;
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
WARN("port %u the requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
dev->data->port_id,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
mb_len - RTE_PKTMBUF_HEADROOM);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u the requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered mode has"
|
||||
" not been requested",
|
||||
dev->data->port_id,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
mb_len - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
DEBUG("port %u maximum number of segments per packet: %u",
|
||||
dev->data->port_id, 1 << tmpl->rxq.sges_n);
|
||||
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
|
||||
dev->data->port_id, 1 << tmpl->rxq.sges_n);
|
||||
if (desc % (1 << tmpl->rxq.sges_n)) {
|
||||
ERROR("port %u number of Rx queue descriptors (%u) is not a"
|
||||
" multiple of SGEs per packet (%u)",
|
||||
dev->data->port_id,
|
||||
desc,
|
||||
1 << tmpl->rxq.sges_n);
|
||||
DRV_LOG(ERR,
|
||||
"port %u number of Rx queue descriptors (%u) is not a"
|
||||
" multiple of SGEs per packet (%u)",
|
||||
dev->data->port_id,
|
||||
desc,
|
||||
1 << tmpl->rxq.sges_n);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -1037,17 +1056,19 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
} else if (config->hw_fcs_strip) {
|
||||
tmpl->rxq.crc_present = 1;
|
||||
} else {
|
||||
WARN("port %u CRC stripping has been disabled but will still"
|
||||
" be performed by hardware, make sure MLNX_OFED and"
|
||||
" firmware are up to date",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u CRC stripping has been disabled but will"
|
||||
" still be performed by hardware, make sure MLNX_OFED"
|
||||
" and firmware are up to date",
|
||||
dev->data->port_id);
|
||||
tmpl->rxq.crc_present = 0;
|
||||
}
|
||||
DEBUG("port %u CRC stripping is %s, %u bytes will be subtracted from"
|
||||
" incoming frames to hide it",
|
||||
dev->data->port_id,
|
||||
tmpl->rxq.crc_present ? "disabled" : "enabled",
|
||||
tmpl->rxq.crc_present << 2);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u CRC stripping is %s, %u bytes will be subtracted from"
|
||||
" incoming frames to hide it",
|
||||
dev->data->port_id,
|
||||
tmpl->rxq.crc_present ? "disabled" : "enabled",
|
||||
tmpl->rxq.crc_present << 2);
|
||||
/* Save port ID. */
|
||||
tmpl->rxq.rss_hash = priv->rxqs_n > 1;
|
||||
tmpl->rxq.port_id = dev->data->port_id;
|
||||
@ -1059,8 +1080,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
|
||||
tmpl->idx = idx;
|
||||
rte_atomic32_inc(&tmpl->refcnt);
|
||||
DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
|
||||
idx, rte_atomic32_read(&tmpl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
|
||||
idx, rte_atomic32_read(&tmpl->refcnt));
|
||||
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
|
||||
return tmpl;
|
||||
error:
|
||||
@ -1091,8 +1112,9 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
|
||||
rxq);
|
||||
mlx5_rxq_ibv_get(dev, idx);
|
||||
rte_atomic32_inc(&rxq_ctrl->refcnt);
|
||||
DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
|
||||
rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
|
||||
dev->data->port_id, rxq_ctrl->idx,
|
||||
rte_atomic32_read(&rxq_ctrl->refcnt));
|
||||
}
|
||||
return rxq_ctrl;
|
||||
}
|
||||
@ -1120,8 +1142,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
|
||||
assert(rxq_ctrl->priv);
|
||||
if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
|
||||
rxq_ctrl->ibv = NULL;
|
||||
DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
|
||||
rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
|
||||
rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
|
||||
LIST_REMOVE(rxq_ctrl, next);
|
||||
rte_free(rxq_ctrl);
|
||||
@ -1174,8 +1196,8 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
|
||||
DEBUG("port %u Rx queue %u still referenced",
|
||||
dev->data->port_id, rxq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
|
||||
dev->data->port_id, rxq_ctrl->idx);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
@ -1238,12 +1260,14 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
|
||||
}
|
||||
rte_atomic32_inc(&ind_tbl->refcnt);
|
||||
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
|
||||
DEBUG("port %u indirection table %p: refcnt %d", dev->data->port_id,
|
||||
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
|
||||
dev->data->port_id, (void *)ind_tbl,
|
||||
rte_atomic32_read(&ind_tbl->refcnt));
|
||||
return ind_tbl;
|
||||
error:
|
||||
rte_free(ind_tbl);
|
||||
DEBUG("port %u cannot create indirection table", dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u cannot create indirection table",
|
||||
dev->data->port_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1278,9 +1302,9 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
|
||||
unsigned int i;
|
||||
|
||||
rte_atomic32_inc(&ind_tbl->refcnt);
|
||||
DEBUG("port %u indirection table %p: refcnt %d",
|
||||
dev->data->port_id, (void *)ind_tbl,
|
||||
rte_atomic32_read(&ind_tbl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
|
||||
dev->data->port_id, (void *)ind_tbl,
|
||||
rte_atomic32_read(&ind_tbl->refcnt));
|
||||
for (i = 0; i != ind_tbl->queues_n; ++i)
|
||||
mlx5_rxq_get(dev, ind_tbl->queues[i]);
|
||||
}
|
||||
@ -1304,9 +1328,9 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
DEBUG("port %u indirection table %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
|
||||
claim_zero(mlx5_glue->destroy_rwq_ind_table
|
||||
(ind_tbl->ind_table));
|
||||
@ -1337,8 +1361,9 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
|
||||
DEBUG("port %u Verbs indirection table %p still referenced",
|
||||
dev->data->port_id, (void *)ind_tbl);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u Verbs indirection table %p still referenced",
|
||||
dev->data->port_id, (void *)ind_tbl);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
@ -1413,8 +1438,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
memcpy(hrxq->rss_key, rss_key, rss_key_len);
|
||||
rte_atomic32_inc(&hrxq->refcnt);
|
||||
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
|
||||
DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
|
||||
dev->data->port_id, (void *)hrxq,
|
||||
rte_atomic32_read(&hrxq->refcnt));
|
||||
return hrxq;
|
||||
error:
|
||||
err = rte_errno; /* Save rte_errno before cleanup. */
|
||||
@ -1466,8 +1492,9 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
continue;
|
||||
}
|
||||
rte_atomic32_inc(&hrxq->refcnt);
|
||||
DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
|
||||
dev->data->port_id, (void *)hrxq,
|
||||
rte_atomic32_read(&hrxq->refcnt));
|
||||
return hrxq;
|
||||
}
|
||||
return NULL;
|
||||
@ -1487,9 +1514,9 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
int
|
||||
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
|
||||
{
|
||||
DEBUG("port %u hash Rx queue %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
|
||||
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
|
||||
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
|
||||
@ -1518,8 +1545,9 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
|
||||
DEBUG("port %u Verbs hash Rx queue %p still referenced",
|
||||
dev->data->port_id, (void *)hrxq);
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u Verbs hash Rx queue %p still referenced",
|
||||
dev->data->port_id, (void *)hrxq);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
|
@ -377,9 +377,10 @@ check_cqe(volatile struct mlx5_cqe *cqe,
|
||||
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
|
||||
return 0;
|
||||
if (!check_cqe_seen(cqe)) {
|
||||
ERROR("unexpected CQE error %u (0x%02x)"
|
||||
" syndrome 0x%02x",
|
||||
op_code, op_code, syndrome);
|
||||
DRV_LOG(ERR,
|
||||
"unexpected CQE error %u (0x%02x) syndrome"
|
||||
" 0x%02x",
|
||||
op_code, op_code, syndrome);
|
||||
rte_hexdump(stderr, "MLX5 Error CQE:",
|
||||
(const void *)((uintptr_t)err_cqe),
|
||||
sizeof(*err_cqe));
|
||||
@ -388,8 +389,8 @@ check_cqe(volatile struct mlx5_cqe *cqe,
|
||||
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
|
||||
(op_code != MLX5_CQE_REQ)) {
|
||||
if (!check_cqe_seen(cqe)) {
|
||||
ERROR("unexpected CQE opcode %u (0x%02x)",
|
||||
op_code, op_code);
|
||||
DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
|
||||
op_code, op_code);
|
||||
rte_hexdump(stderr, "MLX5 CQE:",
|
||||
(const void *)((uintptr_t)cqe),
|
||||
sizeof(*cqe));
|
||||
@ -449,7 +450,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
|
||||
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
|
||||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!check_cqe_seen(cqe)) {
|
||||
ERROR("unexpected error CQE, Tx stopped");
|
||||
DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
|
||||
rte_hexdump(stderr, "MLX5 TXQ:",
|
||||
(const void *)((uintptr_t)txq->wqes),
|
||||
((1 << txq->wqe_n) *
|
||||
@ -566,8 +567,8 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
||||
} else {
|
||||
struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
|
||||
|
||||
WARN("failed to register mempool 0x%p(%s)",
|
||||
(void *)mp, mp->name);
|
||||
DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
|
||||
(void *)mp, mp->name);
|
||||
}
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
@ -42,8 +42,8 @@ mlx5_socket_init(struct rte_eth_dev *dev)
|
||||
ret = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u secondary process not supported: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING, "port %u secondary process not supported: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
goto error;
|
||||
}
|
||||
priv->primary_socket = ret;
|
||||
@ -66,15 +66,17 @@ mlx5_socket_init(struct rte_eth_dev *dev)
|
||||
sizeof(sun));
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u cannot bind socket, secondary process not"
|
||||
" supported: %s", dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING,
|
||||
"port %u cannot bind socket, secondary process not"
|
||||
" supported: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
goto close;
|
||||
}
|
||||
ret = listen(priv->primary_socket, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u secondary process not supported: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING, "port %u secondary process not supported: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
goto close;
|
||||
}
|
||||
return 0;
|
||||
@ -133,29 +135,29 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
/* Accept the connection from the client. */
|
||||
conn_sock = accept(priv->primary_socket, NULL, NULL);
|
||||
if (conn_sock < 0) {
|
||||
WARN("port %u connection failed: %s", dev->data->port_id,
|
||||
strerror(errno));
|
||||
DRV_LOG(WARNING, "port %u connection failed: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
return;
|
||||
}
|
||||
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
|
||||
sizeof(int));
|
||||
if (ret < 0) {
|
||||
ret = errno;
|
||||
WARN("port %u cannot change socket options: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING, "port %u cannot change socket options: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
|
||||
if (ret < 0) {
|
||||
ret = errno;
|
||||
WARN("port %u received an empty message: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(WARNING, "port %u received an empty message: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
/* Expect to receive credentials only. */
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
WARN("port %u no message", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u no message", dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
|
||||
@ -165,13 +167,15 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
}
|
||||
cmsg = CMSG_NXTHDR(&msg, cmsg);
|
||||
if (cmsg != NULL) {
|
||||
WARN("port %u message wrongly formatted", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u message wrongly formatted",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
/* Make sure all the ancillary data was received and valid. */
|
||||
if ((cred == NULL) || (cred->uid != getuid()) ||
|
||||
(cred->gid != getgid())) {
|
||||
WARN("port %u wrong credentials", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u wrong credentials",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
/* Set-up the ancillary data. */
|
||||
@ -184,7 +188,8 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
*fd = priv->ctx->cmd_fd;
|
||||
ret = sendmsg(conn_sock, &msg, 0);
|
||||
if (ret < 0)
|
||||
WARN("port %u cannot send response", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot send response",
|
||||
dev->data->port_id);
|
||||
error:
|
||||
close(conn_sock);
|
||||
}
|
||||
@ -226,7 +231,8 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
ret = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u cannot connect to primary", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot connect to primary",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
socket_fd = ret;
|
||||
@ -235,13 +241,15 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u cannot connect to primary", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u cannot connect to primary",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
DEBUG("port %u cannot get first message", dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u cannot get first message",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
cmsg->cmsg_level = SOL_SOCKET;
|
||||
@ -250,7 +258,8 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
cred = (struct ucred *)CMSG_DATA(cmsg);
|
||||
if (cred == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
DEBUG("port %u no credentials received", dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u no credentials received",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
cred->pid = getpid();
|
||||
@ -259,27 +268,29 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u cannot send credentials to primary: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING,
|
||||
"port %u cannot send credentials to primary: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
goto error;
|
||||
}
|
||||
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
|
||||
if (ret <= 0) {
|
||||
rte_errno = errno;
|
||||
WARN("port %u no message from primary: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING, "port %u no message from primary: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
goto error;
|
||||
}
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
WARN("port %u no file descriptor received", dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u no file descriptor received",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
fd = (int *)CMSG_DATA(cmsg);
|
||||
if (*fd < 0) {
|
||||
WARN("port %u no file descriptor received: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
DRV_LOG(WARNING, "port %u no file descriptor received: %s",
|
||||
dev->data->port_id, strerror(errno));
|
||||
rte_errno = *fd;
|
||||
goto error;
|
||||
}
|
||||
|
@ -148,8 +148,9 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
|
||||
ifr.ifr_data = (caddr_t)et_stats;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u unable to read statistic values from device",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u unable to read statistic values from device",
|
||||
dev->data->port_id);
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i != xstats_n; ++i) {
|
||||
@ -195,8 +196,8 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
|
||||
ifr.ifr_data = (caddr_t)&drvinfo;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u unable to query number of statistics",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u unable to query number of statistics",
|
||||
dev->data->port_id);
|
||||
return ret;
|
||||
}
|
||||
return drvinfo.n_stats;
|
||||
@ -223,8 +224,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
|
||||
ret = mlx5_ethtool_get_stats_n(dev);
|
||||
if (ret < 0) {
|
||||
WARN("port %u no extended statistics available",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u no extended statistics available",
|
||||
dev->data->port_id);
|
||||
return;
|
||||
}
|
||||
dev_stats_n = ret;
|
||||
@ -235,7 +236,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
rte_malloc("xstats_strings",
|
||||
str_sz + sizeof(struct ethtool_gstrings), 0);
|
||||
if (!strings) {
|
||||
WARN("port %u unable to allocate memory for xstats",
|
||||
DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
|
||||
dev->data->port_id);
|
||||
return;
|
||||
}
|
||||
@ -245,8 +246,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
ifr.ifr_data = (caddr_t)strings;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("port %u unable to get statistic names",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(WARNING, "port %u unable to get statistic names",
|
||||
dev->data->port_id);
|
||||
goto free;
|
||||
}
|
||||
for (j = 0; j != xstats_n; ++j)
|
||||
@ -267,9 +268,10 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
if (mlx5_counters_init[j].ib)
|
||||
continue;
|
||||
if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
|
||||
WARN("port %u counter \"%s\" is not recognized",
|
||||
dev->data->port_id,
|
||||
mlx5_counters_init[j].dpdk_name);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u counter \"%s\" is not recognized",
|
||||
dev->data->port_id,
|
||||
mlx5_counters_init[j].dpdk_name);
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
@ -277,8 +279,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
assert(xstats_n <= MLX5_MAX_XSTATS);
|
||||
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
|
||||
if (ret)
|
||||
ERROR("port %u cannot read device counters: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot read device counters: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
free:
|
||||
rte_free(strings);
|
||||
}
|
||||
@ -445,16 +447,16 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
|
||||
|
||||
stats_n = mlx5_ethtool_get_stats_n(dev);
|
||||
if (stats_n < 0) {
|
||||
ERROR("port %u cannot get stats: %s", dev->data->port_id,
|
||||
strerror(-stats_n));
|
||||
DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
|
||||
strerror(-stats_n));
|
||||
return;
|
||||
}
|
||||
if (xstats_ctrl->stats_n != stats_n)
|
||||
mlx5_xstats_init(dev);
|
||||
ret = mlx5_read_dev_counters(dev, counters);
|
||||
if (ret) {
|
||||
ERROR("port %u cannot read device counters: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u cannot read device counters: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
for (i = 0; i != n; ++i)
|
||||
|
@ -150,39 +150,39 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
dev->data->dev_started = 1;
|
||||
ret = mlx5_flow_create_drop_queue(dev);
|
||||
if (ret) {
|
||||
ERROR("port %u drop queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
DEBUG("port %u allocating and configuring hash Rx queues",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
|
||||
dev->data->port_id);
|
||||
rte_mempool_walk(mlx5_mp2mr_iter, priv);
|
||||
ret = mlx5_txq_start(dev);
|
||||
if (ret) {
|
||||
ERROR("port %u Tx queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = mlx5_rxq_start(dev);
|
||||
if (ret) {
|
||||
ERROR("port %u Rx queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = mlx5_rx_intr_vec_enable(dev);
|
||||
if (ret) {
|
||||
ERROR("port %u Rx interrupt vector creation failed",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
mlx5_xstats_init(dev);
|
||||
/* Update link status and Tx/Rx callbacks for the first time. */
|
||||
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
|
||||
INFO("port %u forcing link to be up", dev->data->port_id);
|
||||
DRV_LOG(INFO, "forcing port %u link to be up", dev->data->port_id);
|
||||
ret = mlx5_force_link_status_change(dev, ETH_LINK_UP);
|
||||
if (ret) {
|
||||
DEBUG("failed to set port %u link to be up",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "failed to set port %u link to be up",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
}
|
||||
mlx5_dev_interrupt_handler_install(dev);
|
||||
@ -222,8 +222,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
dev->tx_pkt_burst = removed_tx_burst;
|
||||
rte_wmb();
|
||||
usleep(1000 * priv->rxqs_n);
|
||||
DEBUG("port %u cleaning up and destroying hash Rx queues",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
|
||||
dev->data->port_id);
|
||||
mlx5_flow_stop(dev, &priv->flows);
|
||||
mlx5_traffic_disable(dev);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
|
@ -47,8 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
|
||||
for (i = 0; (i != elts_n); ++i)
|
||||
(*txq_ctrl->txq.elts)[i] = NULL;
|
||||
DEBUG("port %u Tx queue %u allocated and configured %u WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
|
||||
txq_ctrl->txq.elts_head = 0;
|
||||
txq_ctrl->txq.elts_tail = 0;
|
||||
txq_ctrl->txq.elts_comp = 0;
|
||||
@ -69,8 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
|
||||
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
|
||||
|
||||
DEBUG("port %u Tx queue %u freeing WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
|
||||
txq_ctrl->txq.elts_head = 0;
|
||||
txq_ctrl->txq.elts_tail = 0;
|
||||
txq_ctrl->txq.elts_comp = 0;
|
||||
@ -181,49 +181,53 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
|
||||
!mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
|
||||
rte_errno = ENOTSUP;
|
||||
ERROR("port %u Tx queue offloads 0x%" PRIx64 " don't match"
|
||||
" port offloads 0x%" PRIx64 " or supported offloads 0x%"
|
||||
PRIx64,
|
||||
dev->data->port_id, conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
mlx5_get_tx_port_offloads(dev));
|
||||
DRV_LOG(ERR,
|
||||
"port %u Tx queue offloads 0x%" PRIx64 " don't match"
|
||||
" port offloads 0x%" PRIx64 " or supported offloads 0x%"
|
||||
PRIx64,
|
||||
dev->data->port_id, conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
mlx5_get_tx_port_offloads(dev));
|
||||
return -rte_errno;
|
||||
}
|
||||
if (desc <= MLX5_TX_COMP_THRESH) {
|
||||
WARN("port %u number of descriptors requested for Tx queue %u"
|
||||
" must be higher than MLX5_TX_COMP_THRESH, using"
|
||||
" %u instead of %u",
|
||||
dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u number of descriptors requested for Tx queue"
|
||||
" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
|
||||
" instead of %u",
|
||||
dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
|
||||
desc = MLX5_TX_COMP_THRESH + 1;
|
||||
}
|
||||
if (!rte_is_power_of_2(desc)) {
|
||||
desc = 1 << log2above(desc);
|
||||
WARN("port %u increased number of descriptors in Tx queue %u"
|
||||
" to the next power of two (%d)",
|
||||
dev->data->port_id, idx, desc);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u increased number of descriptors in Tx queue"
|
||||
" %u to the next power of two (%d)",
|
||||
dev->data->port_id, idx, desc);
|
||||
}
|
||||
DEBUG("port %u configuring queue %u for %u descriptors",
|
||||
dev->data->port_id, idx, desc);
|
||||
DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
|
||||
dev->data->port_id, idx, desc);
|
||||
if (idx >= priv->txqs_n) {
|
||||
ERROR("port %u Tx queue index out of range (%u >= %u)",
|
||||
dev->data->port_id, idx, priv->txqs_n);
|
||||
DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
|
||||
dev->data->port_id, idx, priv->txqs_n);
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_txq_releasable(dev, idx)) {
|
||||
rte_errno = EBUSY;
|
||||
ERROR("port %u unable to release queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u unable to release queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_txq_release(dev, idx);
|
||||
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
|
||||
if (!txq_ctrl) {
|
||||
ERROR("port %u unable to allocate queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
|
||||
dev->data->port_id, idx);
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("port %u adding Tx queue %u to list", dev->data->port_id, idx);
|
||||
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
|
||||
dev->data->port_id, idx);
|
||||
(*priv->txqs)[idx] = &txq_ctrl->txq;
|
||||
return 0;
|
||||
}
|
||||
@ -249,8 +253,8 @@ mlx5_tx_queue_release(void *dpdk_txq)
|
||||
for (i = 0; (i != priv->txqs_n); ++i)
|
||||
if ((*priv->txqs)[i] == txq) {
|
||||
mlx5_txq_release(priv->dev, i);
|
||||
DEBUG("port %u removing Tx queue %u from list",
|
||||
priv->dev->data->port_id, txq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
|
||||
priv->dev->data->port_id, txq_ctrl->idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -321,9 +325,10 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
|
||||
txq_ctrl->uar_mmap_offset);
|
||||
if (ret != addr) {
|
||||
/* fixed mmap have to return same address */
|
||||
ERROR("port %u call to mmap failed on UAR for"
|
||||
" txq %u", dev->data->port_id,
|
||||
txq_ctrl->idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u call to mmap failed on UAR"
|
||||
" for txq %u",
|
||||
dev->data->port_id, txq_ctrl->idx);
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -394,8 +399,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
|
||||
priv->verbs_alloc_ctx.obj = txq_ctrl;
|
||||
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
|
||||
ERROR("port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR,
|
||||
"port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
|
||||
dev->data->port_id);
|
||||
rte_errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
@ -410,8 +416,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
|
||||
tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
|
||||
if (tmpl.cq == NULL) {
|
||||
ERROR("port %u Tx queue %u CQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -453,8 +459,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
}
|
||||
tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
|
||||
if (tmpl.qp == NULL) {
|
||||
ERROR("port %u Tx queue %u QP creation failure",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -467,8 +473,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
|
||||
(IBV_QP_STATE | IBV_QP_PORT));
|
||||
if (ret) {
|
||||
ERROR("port %u Tx queue %u QP state to IBV_QPS_INIT failed",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Tx queue %u QP state to IBV_QPS_INIT failed",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
@ -477,24 +484,26 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
};
|
||||
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
ERROR("port %u Tx queue %u QP state to IBV_QPS_RTR failed",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Tx queue %u QP state to IBV_QPS_RTR failed",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
attr.mod.qp_state = IBV_QPS_RTS;
|
||||
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
ERROR("port %u Tx queue %u QP state to IBV_QPS_RTS failed",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR,
|
||||
"port %u Tx queue %u QP state to IBV_QPS_RTS failed",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
|
||||
txq_ctrl->socket);
|
||||
if (!txq_ibv) {
|
||||
ERROR("port %u Tx queue %u cannot allocate memory",
|
||||
dev->data->port_id, idx);
|
||||
DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
|
||||
dev->data->port_id, idx);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -508,9 +517,10 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
goto error;
|
||||
}
|
||||
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
||||
ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
|
||||
"it should be set to %u", dev->data->port_id,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
DRV_LOG(ERR,
|
||||
"port %u wrong MLX5_CQE_SIZE environment variable"
|
||||
" value: it should be set to %u",
|
||||
dev->data->port_id, RTE_CACHE_LINE_SIZE);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -536,13 +546,15 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
|
||||
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
|
||||
} else {
|
||||
ERROR("port %u failed to retrieve UAR info, invalid libmlx5.so",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR,
|
||||
"port %u failed to retrieve UAR info, invalid"
|
||||
" libmlx5.so",
|
||||
dev->data->port_id);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("port %u Verbs Tx queue %u: refcnt %d", dev->data->port_id, idx,
|
||||
rte_atomic32_read(&txq_ibv->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
|
||||
dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
|
||||
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
|
||||
txq_ibv->txq_ctrl = txq_ctrl;
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
@ -582,8 +594,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
|
||||
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
|
||||
if (txq_ctrl->ibv) {
|
||||
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
|
||||
DEBUG("port %u Verbs Tx queue %u: refcnt %d",
|
||||
dev->data->port_id, txq_ctrl->idx,
|
||||
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
|
||||
dev->data->port_id, txq_ctrl->idx,
|
||||
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
|
||||
}
|
||||
return txq_ctrl->ibv;
|
||||
@ -602,9 +614,9 @@ int
|
||||
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
|
||||
{
|
||||
assert(txq_ibv);
|
||||
DEBUG("port %u Verbs Tx queue %u: refcnt %d",
|
||||
txq_ibv->txq_ctrl->priv->dev->data->port_id,
|
||||
txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
|
||||
txq_ibv->txq_ctrl->priv->dev->data->port_id,
|
||||
txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
|
||||
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
|
||||
claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
|
||||
@ -645,9 +657,8 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
|
||||
struct mlx5_txq_ibv *txq_ibv;
|
||||
|
||||
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
|
||||
DEBUG("port %u Verbs Tx queue %u still referenced",
|
||||
dev->data->port_id,
|
||||
txq_ibv->txq_ctrl->idx);
|
||||
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
|
||||
dev->data->port_id, txq_ibv->txq_ctrl->idx);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
@ -738,9 +749,11 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
|
||||
max_inline = max_inline - (max_inline %
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
WARN("port %u txq inline is too large (%d) setting it"
|
||||
" to the maximum possible: %d\n",
|
||||
priv->dev->data->port_id, txq_inline, max_inline);
|
||||
DRV_LOG(WARNING,
|
||||
"port %u txq inline is too large (%d) setting"
|
||||
" it to the maximum possible: %d\n",
|
||||
priv->dev->data->port_id, txq_inline,
|
||||
max_inline);
|
||||
txq_ctrl->txq.max_inline = max_inline /
|
||||
RTE_CACHE_LINE_SIZE;
|
||||
}
|
||||
@ -794,16 +807,16 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
tmpl->idx = idx;
|
||||
txq_set_params(tmpl);
|
||||
/* MRs will be registered in mp2mr[] later. */
|
||||
DEBUG("port %u priv->device_attr.max_qp_wr is %d", dev->data->port_id,
|
||||
priv->device_attr.orig_attr.max_qp_wr);
|
||||
DEBUG("port %u priv->device_attr.max_sge is %d", dev->data->port_id,
|
||||
priv->device_attr.orig_attr.max_sge);
|
||||
DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
|
||||
DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
|
||||
dev->data->port_id, priv->device_attr.orig_attr.max_sge);
|
||||
tmpl->txq.elts =
|
||||
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
|
||||
tmpl->txq.stats.idx = idx;
|
||||
rte_atomic32_inc(&tmpl->refcnt);
|
||||
DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
|
||||
idx, rte_atomic32_read(&tmpl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
|
||||
idx, rte_atomic32_read(&tmpl->refcnt));
|
||||
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
|
||||
return tmpl;
|
||||
}
|
||||
@ -838,8 +851,9 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
|
||||
ctrl->txq.mp2mr[i]->mp));
|
||||
}
|
||||
rte_atomic32_inc(&ctrl->refcnt);
|
||||
DEBUG("port %u Tx queue %u refcnt %d", dev->data->port_id,
|
||||
ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
|
||||
dev->data->port_id,
|
||||
ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
|
||||
}
|
||||
return ctrl;
|
||||
}
|
||||
@ -866,8 +880,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (!(*priv->txqs)[idx])
|
||||
return 0;
|
||||
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
|
||||
DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
|
||||
txq->idx, rte_atomic32_read(&txq->refcnt));
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
|
||||
txq->idx, rte_atomic32_read(&txq->refcnt));
|
||||
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
|
||||
txq->ibv = NULL;
|
||||
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
|
||||
@ -929,8 +943,8 @@ mlx5_txq_verify(struct rte_eth_dev *dev)
|
||||
int ret = 0;
|
||||
|
||||
LIST_FOREACH(txq, &priv->txqsctrl, next) {
|
||||
DEBUG("port %u Tx queue %u still referenced",
|
||||
dev->data->port_id, txq->idx);
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
|
||||
dev->data->port_id, txq->idx);
|
||||
++ret;
|
||||
}
|
||||
return ret;
|
||||
|
@ -61,14 +61,21 @@ pmd_drv_log_basename(const char *s)
|
||||
return s;
|
||||
}
|
||||
|
||||
extern int mlx5_logtype;
|
||||
|
||||
#define PMD_DRV_LOG___(level, ...) \
|
||||
rte_log(RTE_LOG_ ## level, \
|
||||
mlx5_logtype, \
|
||||
RTE_FMT(MLX5_DRIVER_NAME ": " \
|
||||
RTE_FMT_HEAD(__VA_ARGS__,), \
|
||||
RTE_FMT_TAIL(__VA_ARGS__,)))
|
||||
|
||||
/*
|
||||
* When debugging is enabled (NDEBUG not defined), file, line and function
|
||||
* information replace the driver name (MLX5_DRIVER_NAME) in log messages.
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
|
||||
#define PMD_DRV_LOG___(level, ...) \
|
||||
ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__))
|
||||
#define PMD_DRV_LOG__(level, ...) \
|
||||
PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
|
||||
#define PMD_DRV_LOG_(level, s, ...) \
|
||||
@ -80,9 +87,6 @@ pmd_drv_log_basename(const char *s)
|
||||
__VA_ARGS__)
|
||||
|
||||
#else /* NDEBUG */
|
||||
|
||||
#define PMD_DRV_LOG___(level, ...) \
|
||||
ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__))
|
||||
#define PMD_DRV_LOG__(level, ...) \
|
||||
PMD_DRV_LOG___(level, __VA_ARGS__)
|
||||
#define PMD_DRV_LOG_(level, s, ...) \
|
||||
@ -91,33 +95,24 @@ pmd_drv_log_basename(const char *s)
|
||||
#endif /* NDEBUG */
|
||||
|
||||
/* Generic printf()-like logging macro with automatic line feed. */
|
||||
#define PMD_DRV_LOG(level, ...) \
|
||||
#define DRV_LOG(level, ...) \
|
||||
PMD_DRV_LOG_(level, \
|
||||
__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
|
||||
PMD_DRV_LOG_CPAREN)
|
||||
|
||||
/*
|
||||
* Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
|
||||
* any check when debugging is disabled.
|
||||
*/
|
||||
/* claim_zero() does not perform any check when debugging is disabled. */
|
||||
#ifndef NDEBUG
|
||||
|
||||
#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
|
||||
#define claim_zero(...) assert((__VA_ARGS__) == 0)
|
||||
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
|
||||
|
||||
#else /* NDEBUG */
|
||||
|
||||
#define DEBUG(...) (void)0
|
||||
#define claim_zero(...) (__VA_ARGS__)
|
||||
#define claim_nonzero(...) (__VA_ARGS__)
|
||||
|
||||
#endif /* NDEBUG */
|
||||
|
||||
#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
|
||||
#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
|
||||
#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
|
||||
|
||||
/* Convenience macros for accessing mbuf fields. */
|
||||
#define NEXT(m) ((m)->next)
|
||||
#define DATA_LEN(m) ((m)->data_len)
|
||||
|
@ -45,8 +45,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
|
||||
DEBUG("port %u %s VLAN filter ID %" PRIu16,
|
||||
dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
|
||||
DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
|
||||
dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
|
||||
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
|
||||
for (i = 0; (i != priv->vlan_filter_n); ++i)
|
||||
if (priv->vlan_filter[i] == vlan_id)
|
||||
@ -108,18 +108,18 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
|
||||
|
||||
/* Validate hw support */
|
||||
if (!priv->config.hw_vlan_strip) {
|
||||
ERROR("port %u VLAN stripping is not supported",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u VLAN stripping is not supported",
|
||||
dev->data->port_id);
|
||||
return;
|
||||
}
|
||||
/* Validate queue number */
|
||||
if (queue >= priv->rxqs_n) {
|
||||
ERROR("port %u VLAN stripping, invalid queue number %d",
|
||||
dev->data->port_id, queue);
|
||||
DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
|
||||
dev->data->port_id, queue);
|
||||
return;
|
||||
}
|
||||
DEBUG("port %u set VLAN offloads 0x%x for port %uqueue %d",
|
||||
dev->data->port_id, vlan_offloads, rxq->port_id, queue);
|
||||
DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
|
||||
dev->data->port_id, vlan_offloads, rxq->port_id, queue);
|
||||
if (!rxq_ctrl->ibv) {
|
||||
/* Update related bits in RX queue. */
|
||||
rxq->vlan_strip = !!on;
|
||||
@ -132,8 +132,8 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
|
||||
};
|
||||
ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
|
||||
if (ret) {
|
||||
ERROR("port %u failed to modified stripping mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
|
||||
dev->data->port_id, strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
/* Update related bits in RX queue. */
|
||||
@ -162,8 +162,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
||||
DEV_RX_OFFLOAD_VLAN_STRIP);
|
||||
|
||||
if (!priv->config.hw_vlan_strip) {
|
||||
ERROR("port %u VLAN stripping is not supported",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u VLAN stripping is not supported",
|
||||
dev->data->port_id);
|
||||
return 0;
|
||||
}
|
||||
/* Run on every RX queue and set/reset VLAN stripping. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user