vhost: rename device id variable

I failed to figure out what does "fh" mean here for a long while.
The only guess I could have had is "file handle". So, you get the
point that it's not well named.

I then figured it out that "fh" is derived from the fuse lib, and
my above guess is right. However, device_fh represents a virtio
net device ID. Therefore, here I rename it to vid (Virtio-net device
ID, or Vhost device ID; choose one you prefer) to make it easier for
understanding.

This name (vid) then will be considered to the only interface to
applications. That's another reason to do the rename: it's our
interface, make it more understandable.

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Rich Lane <rich.lane@bigswitch.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
This commit is contained in:
Yuanhan Liu 2016-05-23 16:36:33 +08:00
parent 7f262239ab
commit e2a1dd1275
12 changed files with 111 additions and 111 deletions

View File

@ -569,7 +569,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
struct virtio_net *dev = vdev->dev;
RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
dev->device_fh);
dev->vid);
/* Add packet to the port tx queue */
tx_q = &lcore_tx_queue[lcore_id];
@ -578,8 +578,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
dev_statistics[dev->device_fh].tx_total++;
dev_statistics[dev->device_fh].tx++;
dev_statistics[dev->vid].tx_total++;
dev_statistics[dev->vid].tx++;
}
if (unlikely(len == MAX_PKT_BURST)) {
@ -721,10 +721,10 @@ switch_worker(__rte_unused void *arg)
ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
&dev_statistics[dev->device_fh].rx_total_atomic,
&dev_statistics[dev->vid].rx_total_atomic,
rx_count);
rte_atomic64_add(
&dev_statistics[dev->device_fh].rx_atomic, ret_count);
&dev_statistics[dev->vid].rx_atomic, ret_count);
}
while (likely(rx_count)) {
rx_count--;
@ -945,7 +945,7 @@ destroy_device(volatile struct virtio_net *dev)
if (ll_lcore_dev_cur == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) Failed to find the dev to be destroy.\n",
dev->device_fh);
dev->vid);
return;
}
@ -993,7 +993,7 @@ destroy_device(volatile struct virtio_net *dev)
lcore_info[vdev->coreid].lcore_ll->device_num--;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
"from data core\n", dev->device_fh);
"from data core\n", dev->vid);
rte_free(vdev);
@ -1015,7 +1015,7 @@ new_device(struct virtio_net *dev)
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) Couldn't allocate memory for vhost dev\n",
dev->device_fh);
dev->vid);
return -1;
}
vdev->dev = dev;
@ -1025,7 +1025,7 @@ new_device(struct virtio_net *dev)
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
" linked list Device limit of %d devices per core"
" has been reached\n", dev->device_fh, nb_devices);
" has been reached\n", dev->vid, nb_devices);
if (vdev->regions_hpa)
rte_free(vdev->regions_hpa);
rte_free(vdev);
@ -1033,7 +1033,7 @@ new_device(struct virtio_net *dev)
}
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
vdev->rx_q = dev->device_fh;
vdev->rx_q = dev->vid;
/* reset ready flag */
vdev->ready = DEVICE_MAC_LEARNING;
@ -1051,7 +1051,7 @@ new_device(struct virtio_net *dev)
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) Failed to add device to data core\n",
dev->device_fh);
dev->vid);
vdev->ready = DEVICE_SAFE_REMOVE;
destroy_device(dev);
rte_free(vdev->regions_hpa);
@ -1065,7 +1065,7 @@ new_device(struct virtio_net *dev)
ll_dev);
/* Initialize device stats */
memset(&dev_statistics[dev->device_fh], 0,
memset(&dev_statistics[dev->vid], 0,
sizeof(struct device_statistics));
/* Disable notifications. */
@ -1075,7 +1075,7 @@ new_device(struct virtio_net *dev)
dev->flags |= VIRTIO_DEV_RUNNING;
RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
dev->device_fh, vdev->coreid);
dev->vid, vdev->coreid);
return 0;
}
@ -1099,7 +1099,7 @@ print_stats(void)
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
uint32_t device_fh;
int vid;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
@ -1113,22 +1113,22 @@ print_stats(void)
dev_ll = ll_root_used;
while (dev_ll != NULL) {
device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
tx_total = dev_statistics[device_fh].tx_total;
tx = dev_statistics[device_fh].tx;
vid = dev_ll->vdev->dev->vid;
tx_total = dev_statistics[vid].tx_total;
tx = dev_statistics[vid].tx;
tx_dropped = tx_total - tx;
rx_total = rte_atomic64_read(
&dev_statistics[device_fh].rx_total_atomic);
&dev_statistics[vid].rx_total_atomic);
rx = rte_atomic64_read(
&dev_statistics[device_fh].rx_atomic);
&dev_statistics[vid].rx_atomic);
rx_dropped = rx_total - rx;
rx_ip_csum = rte_atomic64_read(
&dev_statistics[device_fh].rx_bad_ip_csum);
&dev_statistics[vid].rx_bad_ip_csum);
rx_l4_csum = rte_atomic64_read(
&dev_statistics[device_fh].rx_bad_l4_csum);
&dev_statistics[vid].rx_bad_l4_csum);
printf("\nStatistics for device %"PRIu32" ----------"
printf("\nStatistics for device %d ----------"
"\nTX total: %"PRIu64""
"\nTX dropped: %"PRIu64""
"\nTX successful: %"PRIu64""
@ -1137,7 +1137,7 @@ print_stats(void)
"\nRX bad L4 csum: %"PRIu64""
"\nRX dropped: %"PRIu64""
"\nRX successful: %"PRIu64"",
device_fh,
vid,
tx_total,
tx_dropped,
tx,

View File

@ -245,7 +245,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
int i, ret;
struct ether_hdr *pkt_hdr;
struct virtio_net *dev = vdev->dev;
uint64_t portid = dev->device_fh;
uint64_t portid = dev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
@ -254,7 +254,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
dev->device_fh, VXLAN_N_PORTS);
dev->vid, VXLAN_N_PORTS);
return -1;
}
@ -264,7 +264,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
RTE_LOG(INFO, VHOST_DATA,
"(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
dev->device_fh);
dev->vid);
return -1;
}
@ -436,11 +436,11 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
&dev_statistics[dev->device_fh].rx_bad_ip_csum,
&dev_statistics[dev->vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
&dev_statistics[dev->device_fh].rx_bad_ip_csum,
&dev_statistics[dev->vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}

View File

@ -716,7 +716,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
if (find_vhost_dev(&pkt_hdr->s_addr)) {
RTE_LOG(ERR, VHOST_DATA,
"(%d) device is using a registered MAC!\n",
vdev->device_fh);
vdev->vid);
return -1;
}
@ -724,12 +724,12 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
vdev->vlan_tag = vlan_tags[vdev->device_fh];
vdev->vlan_tag = vlan_tags[vdev->vid];
/* Print out VMDQ registration info. */
RTE_LOG(INFO, VHOST_DATA,
"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
vdev->device_fh,
vdev->vid,
vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
@ -737,11 +737,11 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
(uint32_t)vdev->device_fh + vmdq_pool_base);
(uint32_t)vdev->vid + vmdq_pool_base);
if (ret)
RTE_LOG(ERR, VHOST_DATA,
"(%d) failed to add device MAC address to VMDQ\n",
vdev->device_fh);
vdev->vid);
/* Enable stripping of the vlan tag as we handle routing. */
if (vlan_strip)
@ -820,19 +820,19 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
if (!dst_vdev)
return -1;
if (vdev->device_fh == dst_vdev->device_fh) {
if (vdev->vid == dst_vdev->vid) {
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->device_fh);
vdev->vid);
return 0;
}
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is local\n", dst_vdev->device_fh);
"(%d) TX: MAC address is local\n", dst_vdev->vid);
if (unlikely(dst_vdev->remove)) {
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) device is marked for removal\n", dst_vdev->device_fh);
"(%d) device is marked for removal\n", dst_vdev->vid);
return 0;
}
@ -855,10 +855,10 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
if (!dst_vdev)
return 0;
if (vdev->device_fh == dst_vdev->device_fh) {
if (vdev->vid == dst_vdev->vid) {
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->device_fh);
vdev->vid);
return -1;
}
@ -868,11 +868,11 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
* the packet length by plus it.
*/
*offset = VLAN_HLEN;
*vlan_tag = vlan_tags[vdev->device_fh];
*vlan_tag = vlan_tags[vdev->vid];
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
vdev->device_fh, dst_vdev->device_fh, *vlan_tag);
vdev->vid, dst_vdev->vid, *vlan_tag);
return 0;
}
@ -963,7 +963,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
RTE_LOG(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is external\n", vdev->device_fh);
"(%d) TX: MAC address is external\n", vdev->vid);
queue2nic:
@ -1094,7 +1094,7 @@ drain_virtio_tx(struct vhost_dev *vdev)
}
for (i = 0; i < count; ++i)
virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->device_fh]);
virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
}
/*
@ -1206,7 +1206,7 @@ destroy_device (volatile struct virtio_net *dev)
RTE_LOG(INFO, VHOST_DATA,
"(%d) device has been removed from data core\n",
vdev->device_fh);
vdev->vid);
rte_free(vdev);
}
@ -1221,21 +1221,21 @@ new_device (struct virtio_net *dev)
int lcore, core_add = 0;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
int device_fh = dev->device_fh;
int vid = dev->vid;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
"(%d) couldn't allocate memory for vhost dev\n",
device_fh);
vid);
return -1;
}
vdev->dev = dev;
dev->priv = vdev;
vdev->device_fh = device_fh;
vdev->vid = vid;
TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
vdev->vmdq_rx_q = device_fh * queues_per_pool + vmdq_queue_base;
vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
/*reset ready flag*/
vdev->ready = DEVICE_MAC_LEARNING;
@ -1260,7 +1260,7 @@ new_device (struct virtio_net *dev)
RTE_LOG(INFO, VHOST_DATA,
"(%d) device has been added to data core %d\n",
device_fh, vdev->coreid);
vid, vdev->coreid);
return 0;
}
@ -1312,7 +1312,7 @@ print_stats(void)
"RX total: %" PRIu64 "\n"
"RX dropped: %" PRIu64 "\n"
"RX successful: %" PRIu64 "\n",
vdev->dev->device_fh,
vdev->dev->vid,
tx_total, tx_dropped, tx,
rx_total, rx_dropped, rx);
}

View File

@ -66,7 +66,7 @@ struct vhost_dev {
/**< Device is marked for removal from the data core. */
volatile uint8_t remove;
int device_fh;
int vid;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;

View File

@ -133,7 +133,7 @@ struct virtio_net {
struct virtio_memory *mem; /**< QEMU memory and memory region information. */
uint64_t features; /**< Negotiated feature set. */
uint64_t protocol_features; /**< Negotiated protocol feature set. */
int device_fh; /**< device identifier. */
int vid; /**< device identifier. */
uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ]; /**< Name of the tap device or socket path. */

View File

@ -57,9 +57,9 @@
char packet[VHOST_MAX_PRINT_BUFF]; \
\
if ((header)) \
snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->device_fh), (size)); \
snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
else \
snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->device_fh), (size)); \
snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
for (index = 0; index < (size); index++) { \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
"%02hhx ", pkt_addr[index]); \
@ -80,7 +80,7 @@
*/
struct vhost_device_ctx {
pid_t pid; /* PID of process calling the IOCTL. */
int fh; /* Populated with fi->fh to track the device index. */
int vid; /* Virtio-net device ID */
};
int vhost_new_device(struct vhost_device_ctx);

View File

@ -70,7 +70,7 @@ fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
struct fuse_ctx const *const req_ctx = fuse_req_ctx(req);
ctx.pid = req_ctx->pid;
ctx.fh = fi->fh;
ctx.vid = (int)fi->fh;
return ctx;
}
@ -94,7 +94,7 @@ vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
fi->fh = err;
RTE_LOG(INFO, VHOST_CONFIG,
"(%d) device configuration started\n", fi->fh);
"(%d) device configuration started\n", err);
fuse_reply_open(req, fi);
}
@ -108,7 +108,7 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
vhost_destroy_device(ctx);
RTE_LOG(INFO, VHOST_CONFIG, "(%d) device released\n", ctx.fh);
RTE_LOG(INFO, VHOST_CONFIG, "(%d) device released\n", ctx.vid);
fuse_reply_err(req, err);
}
@ -194,7 +194,7 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
switch (cmd) {
case VHOST_NET_SET_BACKEND:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
"(%d) IOCTL: VHOST_NET_SET_BACKEND\n", ctx.vid);
if (!in_buf) {
VHOST_IOCTL_RETRY(sizeof(file), 0);
break;
@ -206,32 +206,32 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
case VHOST_GET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
"(%d) IOCTL: VHOST_GET_FEATURES\n", ctx.vid);
VHOST_IOCTL_W(uint64_t, features, vhost_get_features);
break;
case VHOST_SET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_FEATURES\n", ctx.vid);
VHOST_IOCTL_R(uint64_t, features, vhost_set_features);
break;
case VHOST_RESET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
"(%d) IOCTL: VHOST_RESET_OWNER\n", ctx.vid);
VHOST_IOCTL(vhost_reset_owner);
break;
case VHOST_SET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_OWNER\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_OWNER\n", ctx.vid);
VHOST_IOCTL(vhost_set_owner);
break;
case VHOST_SET_MEM_TABLE:
/*TODO fix race condition.*/
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_MEM_TABLE\n", ctx.vid);
static struct vhost_memory mem_temp;
switch (in_bufsz) {
@ -264,28 +264,28 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
case VHOST_SET_VRING_NUM:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_VRING_NUM\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_num);
break;
case VHOST_SET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_VRING_BASE\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_base);
break;
case VHOST_GET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
"(%d) IOCTL: VHOST_GET_VRING_BASE\n", ctx.vid);
VHOST_IOCTL_RW(uint32_t, index,
struct vhost_vring_state, state, vhost_get_vring_base);
break;
case VHOST_SET_VRING_ADDR:
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
"(%d) IOCTL: VHOST_SET_VRING_ADDR\n", ctx.vid);
VHOST_IOCTL_R(struct vhost_vring_addr, addr,
vhost_set_vring_addr);
break;
@ -295,11 +295,11 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
if (cmd == VHOST_SET_VRING_KICK)
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_KICK\n",
ctx.fh);
ctx.vid);
else
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: VHOST_SET_VRING_CALL\n",
ctx.fh);
ctx.vid);
if (!in_buf)
VHOST_IOCTL_RETRY(sizeof(struct vhost_vring_file), 0);
else {
@ -326,17 +326,17 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
default:
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) IOCTL: DOESN NOT EXIST\n", ctx.fh);
"(%d) IOCTL: DOESN NOT EXIST\n", ctx.vid);
result = -1;
fuse_reply_ioctl(req, result, NULL, 0);
}
if (result < 0)
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: FAIL\n", ctx.fh);
"(%d) IOCTL: FAIL\n", ctx.vid);
else
LOG_DEBUG(VHOST_CONFIG,
"(%d) IOCTL: SUCCESS\n", ctx.fh);
"(%d) IOCTL: SUCCESS\n", ctx.vid);
}
/*

View File

@ -290,7 +290,7 @@ cuse_set_mem_table(struct vhost_device_ctx ctx,
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->device_fh);
dev->vid);
return -1;
}
@ -394,7 +394,7 @@ get_ifname(struct vhost_device_ctx ctx, struct virtio_net *dev, int tap_fd, int
if (close(fd_tap) < 0)
RTE_LOG(ERR, VHOST_CONFIG, "(%d) fd close failed\n",
dev->device_fh);
dev->vid);
if (ret >= 0) {
ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
@ -402,7 +402,7 @@ get_ifname(struct vhost_device_ctx ctx, struct virtio_net *dev, int tap_fd, int
} else
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) TUNGETIFF ioctl failed\n",
dev->device_fh);
dev->vid);
return 0;
}

View File

@ -264,10 +264,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
uint16_t desc_indexes[MAX_PKT_BURST];
uint32_t i;
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->device_fh, __func__, queue_id);
dev->vid, __func__, queue_id);
return 0;
}
@ -280,7 +280,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
return 0;
LOG_DEBUG(VHOST_DATA, "(%d) res_start_idx %d | res_end_idx Index %d\n",
dev->device_fh, res_start_idx, res_end_idx);
dev->vid, res_start_idx, res_end_idx);
/* Retrieve all of the desc indexes first to avoid caching issues. */
rte_prefetch0(&vq->avail->ring[res_start_idx & (vq->size - 1)]);
@ -442,7 +442,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->device_fh, cur_idx, res_end_idx);
dev->vid, cur_idx, res_end_idx);
if (vq->buf_vec[vec_idx].buf_len < vq->vhost_hlen)
return -1;
@ -452,7 +452,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
virtio_hdr.num_buffers = res_end_idx - res_start_idx;
LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
dev->device_fh, virtio_hdr.num_buffers);
dev->vid, virtio_hdr.num_buffers);
virtio_enqueue_offload(m, &virtio_hdr.hdr);
copy_virtio_net_hdr(vq, desc_addr, virtio_hdr);
@ -530,10 +530,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint32_t pkt_idx = 0, nr_used = 0;
uint16_t start, end;
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->device_fh, __func__, queue_id);
dev->vid, __func__, queue_id);
return 0;
}
@ -552,7 +552,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
&start, &end) < 0)) {
LOG_DEBUG(VHOST_DATA,
"(%d) failed to get enough desc from vring\n",
dev->device_fh);
dev->vid);
break;
}
@ -828,7 +828,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->device_fh, __func__, queue_id);
dev->vid, __func__, queue_id);
return 0;
}
@ -864,7 +864,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
if (free_entries == 0)
goto out;
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->device_fh, __func__);
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
/* Prefetch available ring to retrieve head indexes. */
used_idx = vq->last_used_idx & (vq->size - 1);
@ -873,7 +873,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
dev->device_fh, count);
dev->vid, count);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < count; i++) {

View File

@ -58,7 +58,7 @@ static void vserver_message_handler(int fd, void *dat, int *remove);
struct connfd_ctx {
struct vhost_server *vserver;
int fh;
int vid;
};
#define MAX_VHOST_SERVER 1024
@ -285,7 +285,7 @@ vserver_new_vq_conn(int fd, void *dat, __rte_unused int *remove)
struct vhost_server *vserver = (struct vhost_server *)dat;
int conn_fd;
struct connfd_ctx *ctx;
int fh;
int vid;
struct vhost_device_ctx vdev_ctx = { (pid_t)0, 0 };
unsigned int size;
@ -301,22 +301,22 @@ vserver_new_vq_conn(int fd, void *dat, __rte_unused int *remove)
return;
}
fh = vhost_new_device(vdev_ctx);
if (fh == -1) {
vid = vhost_new_device(vdev_ctx);
if (vid == -1) {
free(ctx);
close(conn_fd);
return;
}
vdev_ctx.fh = fh;
vdev_ctx.vid = vid;
size = strnlen(vserver->path, PATH_MAX);
vhost_set_ifname(vdev_ctx, vserver->path,
size);
RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", fh);
RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
ctx->vserver = vserver;
ctx->fh = fh;
ctx->vid = vid;
fdset_add(&g_vhost_server.fdset,
conn_fd, vserver_message_handler, NULL, ctx);
}
@ -331,7 +331,7 @@ vserver_message_handler(int connfd, void *dat, int *remove)
uint64_t features;
int ret;
ctx.fh = cfd_ctx->fh;
ctx.vid = cfd_ctx->vid;
ret = read_vhost_message(connfd, &msg);
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
if (ret < 0)

View File

@ -133,7 +133,7 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->device_fh);
dev->vid);
return -1;
}
dev->mem->nregions = memory.nregions;

View File

@ -112,11 +112,11 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
struct virtio_net *
get_device(struct vhost_device_ctx ctx)
{
struct virtio_net *dev = vhost_devices[ctx.fh];
struct virtio_net *dev = vhost_devices[ctx.vid];
if (unlikely(!dev)) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) device not found.\n", ctx.fh);
"(%d) device not found.\n", ctx.vid);
}
return dev;
@ -233,7 +233,7 @@ alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
/*
* Reset some variables in device structure, while keeping few
* others untouched, such as device_fh, ifname, virt_qp_nb: they
* others untouched, such as vid, ifname, virt_qp_nb: they
* should be same unless the device is removed.
*/
static void
@ -263,7 +263,7 @@ vhost_new_device(struct vhost_device_ctx ctx)
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev.\n", ctx.fh);
"(%d) failed to allocate memory for dev.\n", ctx.vid);
return -1;
}
@ -278,7 +278,7 @@ vhost_new_device(struct vhost_device_ctx ctx)
}
vhost_devices[i] = dev;
dev->device_fh = i;
dev->vid = i;
return i;
}
@ -303,7 +303,7 @@ vhost_destroy_device(struct vhost_device_ctx ctx)
cleanup_device(dev, 1);
free_device(dev);
vhost_devices[ctx.fh] = NULL;
vhost_devices[ctx.vid] = NULL;
}
void
@ -408,7 +408,7 @@ vhost_set_features(struct vhost_device_ctx ctx, uint64_t *pu)
}
LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
dev->device_fh,
dev->vid,
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
@ -513,7 +513,7 @@ numa_realloc(struct virtio_net *dev, int index)
out:
dev->virtqueue[index] = vq;
dev->virtqueue[index + 1] = vq + 1;
vhost_devices[dev->device_fh] = dev;
vhost_devices[dev->vid] = dev;
return dev;
}
@ -549,7 +549,7 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
dev->device_fh);
dev->vid);
return -1;
}
@ -561,7 +561,7 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
dev->device_fh);
dev->vid);
return -1;
}
@ -570,20 +570,20 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
dev->device_fh);
dev->vid);
return -1;
}
vq->log_guest_addr = addr->log_guest_addr;
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->device_fh, vq->desc);
dev->vid, vq->desc);
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
dev->device_fh, vq->avail);
dev->vid, vq->avail);
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
dev->device_fh, vq->used);
dev->vid, vq->used);
LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
dev->device_fh, vq->log_guest_addr);
dev->vid, vq->log_guest_addr);
return 0;
}