kni: update kernel logging
Switch to dynamic logging functions. Depending kernel configuration this may cause previously visible logs disappear. How to enable dynamic logging: https://www.kernel.org/doc/Documentation/dynamic-debug-howto.txt Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
05788ff054
commit
dd3e4e36d4
@ -25,6 +25,11 @@
|
||||
#ifndef _KNI_DEV_H_
|
||||
#define _KNI_DEV_H_
|
||||
|
||||
#ifdef pr_fmt
|
||||
#undef pr_fmt
|
||||
#endif
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
@ -113,10 +118,8 @@ struct kni_dev {
|
||||
void *alloc_va[MBUF_BURST_SZ];
|
||||
};
|
||||
|
||||
#define KNI_ERR(args...) printk(KERN_DEBUG "KNI: Error: " args)
|
||||
#define KNI_PRINT(args...) printk(KERN_DEBUG "KNI: " args)
|
||||
#ifdef RTE_KNI_KO_DEBUG
|
||||
#define KNI_DBG(args...) printk(KERN_DEBUG "KNI: " args)
|
||||
#define KNI_DBG(args...) pr_debug(args)
|
||||
#else
|
||||
#define KNI_DBG(args...)
|
||||
#endif
|
||||
@ -153,13 +156,13 @@ int igb_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
|
||||
void igb_kni_remove(struct pci_dev *pdev);
|
||||
|
||||
#ifdef RTE_KNI_VHOST_DEBUG_RX
|
||||
#define KNI_DBG_RX(args...) printk(KERN_DEBUG "KNI RX: " args)
|
||||
#define KNI_DBG_RX(args...) pr_debug(args)
|
||||
#else
|
||||
#define KNI_DBG_RX(args...)
|
||||
#endif
|
||||
|
||||
#ifdef RTE_KNI_VHOST_DEBUG_TX
|
||||
#define KNI_DBG_TX(args...) printk(KERN_DEBUG "KNI TX: " args)
|
||||
#define KNI_DBG_TX(args...) pr_debug(args)
|
||||
#else
|
||||
#define KNI_DBG_TX(args...)
|
||||
#endif
|
||||
|
@ -161,17 +161,17 @@ kni_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
KNI_PRINT("######## DPDK kni module loading ########\n");
|
||||
pr_debug("######## DPDK kni module loading ########\n");
|
||||
|
||||
if (kni_parse_kthread_mode() < 0) {
|
||||
KNI_ERR("Invalid parameter for kthread_mode\n");
|
||||
pr_err("Invalid parameter for kthread_mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (multiple_kthread_on == 0)
|
||||
KNI_PRINT("Single kernel thread for all KNI devices\n");
|
||||
pr_debug("Single kernel thread for all KNI devices\n");
|
||||
else
|
||||
KNI_PRINT("Multiple kernel thread mode enabled\n");
|
||||
pr_debug("Multiple kernel thread mode enabled\n");
|
||||
|
||||
#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
|
||||
rc = register_pernet_subsys(&kni_net_ops);
|
||||
@ -183,14 +183,14 @@ kni_init(void)
|
||||
|
||||
rc = misc_register(&kni_misc);
|
||||
if (rc != 0) {
|
||||
KNI_ERR("Misc registration failed\n");
|
||||
pr_err("Misc registration failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Configure the lo mode according to the input parameter */
|
||||
kni_net_config_lo_mode(lo_mode);
|
||||
|
||||
KNI_PRINT("######## DPDK kni module loaded ########\n");
|
||||
pr_debug("######## DPDK kni module loaded ########\n");
|
||||
|
||||
return 0;
|
||||
|
||||
@ -212,7 +212,7 @@ kni_exit(void)
|
||||
#else
|
||||
unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
|
||||
#endif
|
||||
KNI_PRINT("####### DPDK kni module unloaded #######\n");
|
||||
pr_debug("####### DPDK kni module unloaded #######\n");
|
||||
}
|
||||
|
||||
static int __init
|
||||
@ -242,7 +242,7 @@ kni_open(struct inode *inode, struct file *file)
|
||||
return -EBUSY;
|
||||
|
||||
file->private_data = get_net(net);
|
||||
KNI_PRINT("/dev/kni opened\n");
|
||||
pr_debug("/dev/kni opened\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -285,7 +285,7 @@ kni_release(struct inode *inode, struct file *file)
|
||||
clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
|
||||
|
||||
put_net(net);
|
||||
KNI_PRINT("/dev/kni closed\n");
|
||||
pr_debug("/dev/kni closed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -373,7 +373,7 @@ kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
|
||||
|
||||
/* Check if network name has been used */
|
||||
if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
|
||||
KNI_ERR("KNI name %s duplicated\n", dev->name);
|
||||
pr_err("KNI name %s duplicated\n", dev->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -434,7 +434,7 @@ kni_ioctl_create(struct net *net,
|
||||
struct net_device *lad_dev = NULL;
|
||||
struct kni_dev *kni, *dev, *n;
|
||||
|
||||
printk(KERN_INFO "KNI: Creating kni...\n");
|
||||
pr_info("Creating kni...\n");
|
||||
/* Check the buffer size, to avoid warning */
|
||||
if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
|
||||
return -EINVAL;
|
||||
@ -442,7 +442,7 @@ kni_ioctl_create(struct net *net,
|
||||
/* Copy kni info from user space */
|
||||
ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
|
||||
if (ret) {
|
||||
KNI_ERR("copy_from_user in kni_ioctl_create");
|
||||
pr_err("copy_from_user in kni_ioctl_create");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -450,7 +450,7 @@ kni_ioctl_create(struct net *net,
|
||||
* Check if the cpu core id is valid for binding.
|
||||
*/
|
||||
if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
|
||||
KNI_ERR("cpu %u is not online\n", dev_info.core_id);
|
||||
pr_err("cpu %u is not online\n", dev_info.core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -470,7 +470,7 @@ kni_ioctl_create(struct net *net,
|
||||
#endif
|
||||
kni_net_init);
|
||||
if (net_dev == NULL) {
|
||||
KNI_ERR("error allocating device \"%s\"\n", dev_info.name);
|
||||
pr_err("error allocating device \"%s\"\n", dev_info.name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -500,19 +500,19 @@ kni_ioctl_create(struct net *net,
|
||||
#endif
|
||||
kni->mbuf_size = dev_info.mbuf_size;
|
||||
|
||||
KNI_PRINT("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
|
||||
pr_debug("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.tx_phys, kni->tx_q);
|
||||
KNI_PRINT("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
|
||||
pr_debug("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.rx_phys, kni->rx_q);
|
||||
KNI_PRINT("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
|
||||
pr_debug("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.alloc_phys, kni->alloc_q);
|
||||
KNI_PRINT("free_phys: 0x%016llx, free_q addr: 0x%p\n",
|
||||
pr_debug("free_phys: 0x%016llx, free_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.free_phys, kni->free_q);
|
||||
KNI_PRINT("req_phys: 0x%016llx, req_q addr: 0x%p\n",
|
||||
pr_debug("req_phys: 0x%016llx, req_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.req_phys, kni->req_q);
|
||||
KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
|
||||
pr_debug("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
|
||||
(unsigned long long) dev_info.resp_phys, kni->resp_q);
|
||||
KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
|
||||
pr_debug("mbuf_size: %u\n", kni->mbuf_size);
|
||||
|
||||
KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
|
||||
dev_info.bus,
|
||||
@ -525,7 +525,7 @@ kni_ioctl_create(struct net *net,
|
||||
|
||||
/* Support Ethtool */
|
||||
while (pci) {
|
||||
KNI_PRINT("pci_bus: %02x:%02x:%02x\n",
|
||||
pr_debug("pci_bus: %02x:%02x:%02x\n",
|
||||
pci->bus->number,
|
||||
PCI_SLOT(pci->devfn),
|
||||
PCI_FUNC(pci->devfn));
|
||||
@ -548,7 +548,7 @@ kni_ioctl_create(struct net *net,
|
||||
kni->lad_dev = lad_dev;
|
||||
kni_set_ethtool_ops(kni->net_dev);
|
||||
} else {
|
||||
KNI_ERR("Device not supported by ethtool");
|
||||
pr_err("Device not supported by ethtool");
|
||||
kni->lad_dev = NULL;
|
||||
}
|
||||
|
||||
@ -573,7 +573,7 @@ kni_ioctl_create(struct net *net,
|
||||
|
||||
ret = register_netdev(net_dev);
|
||||
if (ret) {
|
||||
KNI_ERR("error %i registering device \"%s\"\n",
|
||||
pr_err("error %i registering device \"%s\"\n",
|
||||
ret, dev_info.name);
|
||||
kni->net_dev = NULL;
|
||||
kni_dev_remove(kni);
|
||||
@ -610,7 +610,7 @@ kni_ioctl_release(struct net *net,
|
||||
|
||||
ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
|
||||
if (ret) {
|
||||
KNI_ERR("copy_from_user in kni_ioctl_release");
|
||||
pr_err("copy_from_user in kni_ioctl_release");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -637,7 +637,7 @@ kni_ioctl_release(struct net *net,
|
||||
break;
|
||||
}
|
||||
up_write(&knet->kni_list_lock);
|
||||
printk(KERN_INFO "KNI: %s release kni named %s\n",
|
||||
pr_info("%s release kni named %s\n",
|
||||
(ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
|
||||
|
||||
return ret;
|
||||
@ -680,7 +680,7 @@ kni_compat_ioctl(struct inode *inode,
|
||||
unsigned long ioctl_param)
|
||||
{
|
||||
/* 32 bits app on 64 bits OS to be supported later */
|
||||
KNI_PRINT("Not implemented.\n");
|
||||
pr_debug("Not implemented.\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (!skb) {
|
||||
KNI_ERR("Out of mem, dropping pkts\n");
|
||||
pr_err("Out of mem, dropping pkts\n");
|
||||
/* Update statistics */
|
||||
kni->stats.rx_dropped++;
|
||||
continue;
|
||||
@ -232,7 +232,7 @@ kni_net_rx_normal(struct kni_dev *kni)
|
||||
ret = kni_fifo_put(kni->free_q, kni->va, num_rx);
|
||||
if (ret != num_rx)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue entries into free_q\n");
|
||||
pr_err("Fail to enqueue entries into free_q\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -303,14 +303,14 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
|
||||
ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into tx_q\n");
|
||||
pr_err("Fail to enqueue mbufs into tx_q\n");
|
||||
}
|
||||
|
||||
/* Burst enqueue mbufs into free_q */
|
||||
ret = kni_fifo_put(kni->free_q, kni->va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into free_q\n");
|
||||
pr_err("Fail to enqueue mbufs into free_q\n");
|
||||
|
||||
/**
|
||||
* Update statistic, and enqueue/dequeue failure is impossible,
|
||||
@ -362,7 +362,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (skb == NULL)
|
||||
KNI_ERR("Out of mem, dropping pkts\n");
|
||||
pr_err("Out of mem, dropping pkts\n");
|
||||
else {
|
||||
/* Align IP on 16B boundary */
|
||||
skb_reserve(skb, 2);
|
||||
@ -375,7 +375,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
/* Simulate real usage, allocate/copy skb twice */
|
||||
skb = dev_alloc_skb(len + 2);
|
||||
if (skb == NULL) {
|
||||
KNI_ERR("Out of mem, dropping pkts\n");
|
||||
pr_err("Out of mem, dropping pkts\n");
|
||||
kni->stats.rx_dropped++;
|
||||
continue;
|
||||
}
|
||||
@ -415,7 +415,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
|
||||
ret = kni_fifo_put(kni->free_q, kni->va, num);
|
||||
if (ret != num)
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbufs into free_q\n");
|
||||
pr_err("Fail to enqueue mbufs into free_q\n");
|
||||
}
|
||||
|
||||
/* rx interface */
|
||||
@ -500,12 +500,12 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
|
||||
if (unlikely(ret != 1)) {
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbuf into tx_q\n");
|
||||
pr_err("Fail to enqueue mbuf into tx_q\n");
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
|
||||
pr_err("Fail to dequeue mbuf from alloc_q\n");
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -598,7 +598,7 @@ kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
|
||||
int ret_val;
|
||||
|
||||
if (!kni || !req) {
|
||||
KNI_ERR("No kni instance or request\n");
|
||||
pr_err("No kni instance or request\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -608,7 +608,7 @@ kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
|
||||
memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
|
||||
num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
|
||||
if (num < 1) {
|
||||
KNI_ERR("Cannot send to req_q\n");
|
||||
pr_err("Cannot send to req_q\n");
|
||||
ret = -EBUSY;
|
||||
goto fail;
|
||||
}
|
||||
@ -622,7 +622,7 @@ kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
|
||||
num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
|
||||
if (num != 1 || resp_va != kni->sync_va) {
|
||||
/* This should never happen */
|
||||
KNI_ERR("No data in resp_q\n");
|
||||
pr_err("No data in resp_q\n");
|
||||
ret = -ENODATA;
|
||||
goto fail;
|
||||
}
|
||||
@ -754,18 +754,18 @@ void
|
||||
kni_net_config_lo_mode(char *lo_str)
|
||||
{
|
||||
if (!lo_str) {
|
||||
KNI_PRINT("loopback disabled");
|
||||
pr_debug("loopback disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!strcmp(lo_str, "lo_mode_none"))
|
||||
KNI_PRINT("loopback disabled");
|
||||
pr_debug("loopback disabled");
|
||||
else if (!strcmp(lo_str, "lo_mode_fifo")) {
|
||||
KNI_PRINT("loopback mode=lo_mode_fifo enabled");
|
||||
pr_debug("loopback mode=lo_mode_fifo enabled");
|
||||
kni_net_rx_func = kni_net_rx_lo_fifo;
|
||||
} else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
|
||||
KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
|
||||
pr_debug("loopback mode=lo_mode_fifo_skb enabled");
|
||||
kni_net_rx_func = kni_net_rx_lo_fifo_skb;
|
||||
} else
|
||||
KNI_PRINT("Incognizant parameter, loopback disabled");
|
||||
pr_debug("Incognizant parameter, loopback disabled");
|
||||
}
|
||||
|
@ -121,12 +121,12 @@ kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
|
||||
ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
|
||||
if (unlikely(ret != 1)) {
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue mbuf into tx_q\n");
|
||||
pr_err("Fail to enqueue mbuf into tx_q\n");
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
|
||||
pr_err("Fail to dequeue mbuf from alloc_q\n");
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -171,7 +171,7 @@ kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
|
||||
skb->data = NULL;
|
||||
if (unlikely(kni_fifo_put(q->fifo, (void **)&skb, 1) != 1))
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue entries into rx cache fifo\n");
|
||||
pr_err("Fail to enqueue entries into rx cache fifo\n");
|
||||
|
||||
pkt_len = kva->data_len;
|
||||
if (unlikely(pkt_len > len))
|
||||
@ -200,7 +200,7 @@ kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
|
||||
va = (void *)kva - kni->mbuf_kva + kni->mbuf_va;
|
||||
if (unlikely(kni_fifo_put(kni->free_q, (void **)&va, 1) != 1))
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue entries into free_q\n");
|
||||
pr_err("Fail to enqueue entries into free_q\n");
|
||||
|
||||
KNI_DBG_RX("receive done %d\n", pkt_len);
|
||||
|
||||
@ -340,7 +340,7 @@ kni_chk_vhost_rx(struct kni_dev *kni)
|
||||
|
||||
except:
|
||||
/* Failing should not happen */
|
||||
KNI_ERR("Fail to enqueue fifo, it shouldn't happen\n");
|
||||
pr_err("Fail to enqueue fifo, it shouldn't happen\n");
|
||||
BUG_ON(1);
|
||||
|
||||
return 0;
|
||||
@ -546,7 +546,7 @@ kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
/* 32 bits app on 64 bits OS to be supported later */
|
||||
KNI_PRINT("Not implemented.\n");
|
||||
pr_debug("Not implemented.\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user