vdpa/ifc: support dynamic enable/disable queue
Support dynamic enable or disable queue. For front end, like QEMU, user can use ethtool to configure queue. For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs. Signed-off-by: Huang Wei <wei.huang@intel.com> Signed-off-by: Andy Pei <andy.pei@intel.com> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
This commit is contained in:
parent
83c7370aca
commit
a9a56423aa
@ -233,6 +233,106 @@ ifcvf_enable_mq(struct ifcvf_hw *hw)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i)
|
||||
{
|
||||
struct ifcvf_pci_common_cfg *cfg;
|
||||
u8 *lm_cfg;
|
||||
u16 notify_off;
|
||||
int msix_vector;
|
||||
|
||||
if (i >= (int)hw->nr_vring)
|
||||
return -1;
|
||||
|
||||
cfg = hw->common_cfg;
|
||||
if (!cfg) {
|
||||
RTE_LOG(ERR, PMD, "common_cfg in HW is NULL.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ifcvf_enable_mq(hw);
|
||||
|
||||
IFCVF_WRITE_REG16(i, &cfg->queue_select);
|
||||
msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
|
||||
if (msix_vector != (i + 1)) {
|
||||
IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
|
||||
msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector);
|
||||
if (msix_vector == IFCVF_MSI_NO_VECTOR) {
|
||||
RTE_LOG(ERR, PMD, "queue %d, msix vec alloc failed\n",
|
||||
i);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
|
||||
&cfg->queue_desc_hi);
|
||||
io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
|
||||
&cfg->queue_avail_hi);
|
||||
io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
|
||||
&cfg->queue_used_hi);
|
||||
IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
|
||||
|
||||
lm_cfg = hw->lm_cfg;
|
||||
if (lm_cfg) {
|
||||
if (hw->device_type == IFCVF_BLK)
|
||||
*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
|
||||
i * IFCVF_LM_CFG_SIZE) =
|
||||
(u32)hw->vring[i].last_avail_idx |
|
||||
((u32)hw->vring[i].last_used_idx << 16);
|
||||
else
|
||||
*(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
|
||||
(i / 2) * IFCVF_LM_CFG_SIZE +
|
||||
(i % 2) * 4) =
|
||||
(u32)hw->vring[i].last_avail_idx |
|
||||
((u32)hw->vring[i].last_used_idx << 16);
|
||||
}
|
||||
|
||||
notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
|
||||
hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
|
||||
notify_off * hw->notify_off_multiplier);
|
||||
IFCVF_WRITE_REG16(1, &cfg->queue_enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i)
|
||||
{
|
||||
struct ifcvf_pci_common_cfg *cfg;
|
||||
u32 ring_state;
|
||||
u8 *lm_cfg;
|
||||
|
||||
if (i >= (int)hw->nr_vring)
|
||||
return;
|
||||
|
||||
cfg = hw->common_cfg;
|
||||
if (!cfg) {
|
||||
RTE_LOG(ERR, PMD, "common_cfg in HW is NULL.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
IFCVF_WRITE_REG16(i, &cfg->queue_select);
|
||||
IFCVF_WRITE_REG16(0, &cfg->queue_enable);
|
||||
|
||||
lm_cfg = hw->lm_cfg;
|
||||
if (lm_cfg) {
|
||||
if (hw->device_type == IFCVF_BLK) {
|
||||
ring_state = *(u32 *)(lm_cfg +
|
||||
IFCVF_LM_RING_STATE_OFFSET +
|
||||
i * IFCVF_LM_CFG_SIZE);
|
||||
hw->vring[i].last_avail_idx =
|
||||
(u16)(ring_state & IFCVF_16_BIT_MASK);
|
||||
} else {
|
||||
ring_state = *(u32 *)(lm_cfg +
|
||||
IFCVF_LM_RING_STATE_OFFSET +
|
||||
(i / 2) * IFCVF_LM_CFG_SIZE +
|
||||
(i % 2) * 4);
|
||||
hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
|
||||
}
|
||||
hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
|
||||
}
|
||||
}
|
||||
|
||||
STATIC int
|
||||
ifcvf_hw_enable(struct ifcvf_hw *hw)
|
||||
{
|
||||
|
@ -163,6 +163,12 @@ ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev);
|
||||
u64
|
||||
ifcvf_get_features(struct ifcvf_hw *hw);
|
||||
|
||||
int
|
||||
ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i);
|
||||
|
||||
void
|
||||
ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i);
|
||||
|
||||
int
|
||||
ifcvf_start_hw(struct ifcvf_hw *hw);
|
||||
|
||||
|
@ -1281,6 +1281,52 @@ ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ifcvf_config_vring(struct ifcvf_internal *internal, int vring)
|
||||
{
|
||||
struct ifcvf_hw *hw = &internal->hw;
|
||||
int vid = internal->vid;
|
||||
struct rte_vhost_vring vq;
|
||||
uint64_t gpa;
|
||||
|
||||
if (hw->vring[vring].enable) {
|
||||
rte_vhost_get_vhost_vring(vid, vring, &vq);
|
||||
gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
|
||||
if (gpa == 0) {
|
||||
DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
|
||||
return -1;
|
||||
}
|
||||
hw->vring[vring].desc = gpa;
|
||||
|
||||
gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
|
||||
if (gpa == 0) {
|
||||
DRV_LOG(ERR, "Fail to get GPA for available ring.");
|
||||
return -1;
|
||||
}
|
||||
hw->vring[vring].avail = gpa;
|
||||
|
||||
gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
|
||||
if (gpa == 0) {
|
||||
DRV_LOG(ERR, "Fail to get GPA for used ring.");
|
||||
return -1;
|
||||
}
|
||||
hw->vring[vring].used = gpa;
|
||||
|
||||
hw->vring[vring].size = vq.size;
|
||||
rte_vhost_get_vring_base(vid, vring,
|
||||
&hw->vring[vring].last_avail_idx,
|
||||
&hw->vring[vring].last_used_idx);
|
||||
ifcvf_enable_vring_hw(&internal->hw, vring);
|
||||
} else {
|
||||
ifcvf_disable_vring_hw(&internal->hw, vring);
|
||||
rte_vhost_set_vring_base(vid, vring,
|
||||
hw->vring[vring].last_avail_idx,
|
||||
hw->vring[vring].last_used_idx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ifcvf_set_vring_state(int vid, int vring, int state)
|
||||
{
|
||||
@ -1288,7 +1334,7 @@ ifcvf_set_vring_state(int vid, int vring, int state)
|
||||
struct internal_list *list;
|
||||
struct ifcvf_internal *internal;
|
||||
struct ifcvf_hw *hw;
|
||||
struct ifcvf_pci_common_cfg *cfg;
|
||||
bool enable = !!state;
|
||||
int ret = 0;
|
||||
|
||||
vdev = rte_vhost_get_vdpa_device(vid);
|
||||
@ -1298,6 +1344,9 @@ ifcvf_set_vring_state(int vid, int vring, int state)
|
||||
return -1;
|
||||
}
|
||||
|
||||
DRV_LOG(INFO, "%s queue %d of vDPA device %s",
|
||||
enable ? "enable" : "disable", vring, vdev->device->name);
|
||||
|
||||
internal = list->internal;
|
||||
if (vring < 0 || vring >= internal->max_queues * 2) {
|
||||
DRV_LOG(ERR, "Vring index %d not correct", vring);
|
||||
@ -1305,27 +1354,41 @@ ifcvf_set_vring_state(int vid, int vring, int state)
|
||||
}
|
||||
|
||||
hw = &internal->hw;
|
||||
hw->vring[vring].enable = enable;
|
||||
|
||||
if (!internal->configured)
|
||||
goto exit;
|
||||
return 0;
|
||||
|
||||
cfg = hw->common_cfg;
|
||||
IFCVF_WRITE_REG16(vring, &cfg->queue_select);
|
||||
IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
|
||||
unset_notify_relay(internal);
|
||||
|
||||
if (!state && hw->vring[vring].enable) {
|
||||
ret = vdpa_disable_vfio_intr(internal);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = vdpa_enable_vfio_intr(internal, false);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s",
|
||||
vdev->device->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (state && !hw->vring[vring].enable) {
|
||||
ret = vdpa_enable_vfio_intr(internal, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ifcvf_config_vring(internal, vring);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s",
|
||||
vring, vdev->device->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = setup_notify_relay(internal);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s",
|
||||
vdev->device->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_vhost_host_notifier_ctrl(vid, vring, enable);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail",
|
||||
vdev->device->name, vring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
exit:
|
||||
hw->vring[vring].enable = !!state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user