examples/vhost_blk: introduce vhost storage sample

A Vhost-blk example that support inflight feature. It uses the
new APIs that introduced in the first patch, so it can show how these
APIs work to support inflight feature.

Signed-off-by: Jin Yu <jin.yu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Jin Yu 2019-11-05 00:36:26 +08:00 committed by Ferruh Yigit
parent c9657ad01f
commit c19beb3f38
12 changed files with 1774 additions and 2 deletions

View File

@ -839,6 +839,8 @@ F: lib/librte_vhost/
F: doc/guides/prog_guide/vhost_lib.rst
F: examples/vhost/
F: doc/guides/sample_app_ug/vhost.rst
F: examples/vhost_blk/
F: doc/guides/sample_app_ug/vhost_blk.rst
F: examples/vhost_crypto/
F: examples/vdpa/
F: doc/guides/sample_app_ug/vdpa.rst

View File

@ -41,6 +41,7 @@ Sample Applications User Guides
packet_ordering
vmdq_dcb_forwarding
vhost
vhost_blk
vhost_crypto
vdpa
ip_pipeline

View File

@ -0,0 +1,63 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2010-2017 Intel Corporation.
Vhost_blk Sample Application
=============================
The vhost_blk sample application implemented a simple block device,
which used as the backend of Qemu vhost-user-blk device. Users can extend
the exist example to use other type of block device(e.g. AIO) besides
memory based block device. Similar with vhost-user-net device, the sample
application used domain socket to communicate with Qemu, and the virtio
ring (split or packed format) was processed by vhost_blk sample application.
The sample application reuse lots codes from SPDK(Storage Performance
Development Kit, https://github.com/spdk/spdk) vhost-user-blk target,
for DPDK vhost library used in storage area, user can take SPDK as
reference as well.
Testing steps
-------------
This section shows the steps how to start a VM with the block device as
fast data path for critical application.
Compiling the Application
-------------------------
To compile the sample application see :doc:`compiling`.
The application is located in the ``examples`` sub-directory.
You will also need to build DPDK both on the host and inside the guest
Start the vhost_blk example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
./vhost_blk -m 1024
.. _vhost_blk_app_run_vm:
Start the VM
~~~~~~~~~~~~
.. code-block:: console
qemu-system-x86_64 -machine accel=kvm \
-m $mem -object memory-backend-file,id=mem,size=$mem,\
mem-path=/dev/hugepages,share=on -numa node,memdev=mem \
-drive file=os.img,if=none,id=disk \
-device ide-hd,drive=disk,bootindex=0 \
-chardev socket,id=char0,reconnect=1,path=/tmp/vhost.socket \
-device vhost-user-blk-pci,ring_packed=1,chardev=char0,num-queues=1 \
...
.. note::
You must check whether your Qemu can support "vhost-user-blk" or not,
Qemu v4.0 or newer version is required.
reconnect=1 means live recovery support that qemu can reconnect vhost_blk
after we restart vhost_blk example.
ring_packed=1 means the device support packed ring but need the guest kernel
version >= 5.0

View File

@ -68,7 +68,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vdpa
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vdpa vhost_blk
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost_crypto
endif

View File

@ -42,7 +42,7 @@ all_examples = [
'skeleton', 'tep_termination',
'timer', 'vdpa',
'vhost', 'vhost_crypto',
'vm_power_manager',
'vhost_blk', 'vm_power_manager',
'vm_power_manager/guest_cli',
'vmdq', 'vmdq_dcb',
]

View File

@ -0,0 +1,68 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# binary name
APP = vhost-blk
# all source are stored in SRCS-y
SRCS-y := blk.c vhost_blk.c vhost_blk_compat.c
# Build using pkg-config variables if possible
$(shell pkg-config --exists libdpdk)
ifeq ($(.SHELLSTATUS),0)
all: shared
.PHONY: shared static
shared: build/$(APP)-shared
ln -sf $(APP)-shared build/$(APP)
static: build/$(APP)-static
ln -sf $(APP)-static build/$(APP)
LDFLAGS += -pthread
PC_FILE := $(shell pkg-config --path libdpdk)
CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
CFLAGS += -DALLOW_EXPERIMENTAL_API
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
build:
@mkdir -p $@
.PHONY: clean
clean:
rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
test -d build && rmdir -p build || true
else # Build using legacy build system
ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
# Default target, detect a build directory, by looking for a path with a .config
RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(CONFIG_RTE_EXEC_ENV_LINUX),y)
$(info This application can only operate in a linux environment, \
please change the definition of the RTE_TARGET environment variable)
all:
else
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
include $(RTE_SDK)/mk/rte.extapp.mk
endif
endif

126
examples/vhost_blk/blk.c Normal file
View File

@ -0,0 +1,126 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2019 Intel Corporation
*/
/**
* This work is largely based on the "vhost-user-blk" implementation by
* SPDK(https://github.com/spdk/spdk).
*/
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <assert.h>
#include <ctype.h>
#include <string.h>
#include <stddef.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_byteorder.h>
#include <rte_string_fns.h>
#include "vhost_blk.h"
#include "blk_spec.h"
static void
vhost_strcpy_pad(void *dst, const char *src, size_t size, int pad)
{
size_t len;
len = strlen(src);
if (len < size) {
memcpy(dst, src, len);
memset((char *)dst + len, pad, size - len);
} else {
memcpy(dst, src, size);
}
}
static int
vhost_bdev_blk_readwrite(struct vhost_block_dev *bdev,
struct vhost_blk_task *task,
uint64_t lba_512, __rte_unused uint32_t xfer_len)
{
uint32_t i;
uint64_t offset;
uint32_t nbytes = 0;
offset = lba_512 * 512;
for (i = 0; i < task->iovs_cnt; i++) {
if (task->dxfer_dir == BLK_DIR_TO_DEV)
memcpy(bdev->data + offset, task->iovs[i].iov_base,
task->iovs[i].iov_len);
else
memcpy(task->iovs[i].iov_base, bdev->data + offset,
task->iovs[i].iov_len);
offset += task->iovs[i].iov_len;
nbytes += task->iovs[i].iov_len;
}
return nbytes;
}
int
vhost_bdev_process_blk_commands(struct vhost_block_dev *bdev,
struct vhost_blk_task *task)
{
int used_len;
if (unlikely(task->data_len > (bdev->blockcnt * bdev->blocklen))) {
fprintf(stderr, "read or write beyond capacity\n");
return VIRTIO_BLK_S_UNSUPP;
}
switch (task->req->type) {
case VIRTIO_BLK_T_IN:
if (unlikely(task->data_len == 0 ||
(task->data_len & (512 - 1)) != 0)) {
fprintf(stderr,
"%s - passed IO buffer is not multiple of 512b"
"(req_idx = %"PRIu16").\n",
task->req->type ? "WRITE" : "READ",
task->head_idx);
return VIRTIO_BLK_S_UNSUPP;
}
task->dxfer_dir = BLK_DIR_FROM_DEV;
vhost_bdev_blk_readwrite(bdev, task,
task->req->sector, task->data_len);
break;
case VIRTIO_BLK_T_OUT:
if (unlikely(task->data_len == 0 ||
(task->data_len & (512 - 1)) != 0)) {
fprintf(stderr,
"%s - passed IO buffer is not multiple of 512b"
"(req_idx = %"PRIu16").\n",
task->req->type ? "WRITE" : "READ",
task->head_idx);
return VIRTIO_BLK_S_UNSUPP;
}
if (task->readtype) {
fprintf(stderr, "type isn't right\n");
return VIRTIO_BLK_S_IOERR;
}
task->dxfer_dir = BLK_DIR_TO_DEV;
vhost_bdev_blk_readwrite(bdev, task,
task->req->sector, task->data_len);
break;
case VIRTIO_BLK_T_GET_ID:
if (!task->iovs_cnt || task->data_len)
return VIRTIO_BLK_S_UNSUPP;
used_len = min(VIRTIO_BLK_ID_BYTES, task->data_len);
vhost_strcpy_pad(task->iovs[0].iov_base,
bdev->product_name, used_len, ' ');
break;
default:
fprintf(stderr, "unsupported cmd\n");
return VIRTIO_BLK_S_UNSUPP;
}
return VIRTIO_BLK_S_OK;
}

View File

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#ifndef _BLK_SPEC_H
#define _BLK_SPEC_H
#include <stdint.h>
#ifndef VHOST_USER_MEMORY_MAX_NREGIONS
#define VHOST_USER_MEMORY_MAX_NREGIONS 8
#endif
#ifndef VHOST_USER_MAX_CONFIG_SIZE
#define VHOST_USER_MAX_CONFIG_SIZE 256
#endif
#ifndef VHOST_USER_PROTOCOL_F_CONFIG
#define VHOST_USER_PROTOCOL_F_CONFIG 9
#endif
#ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
#endif
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
#define VIRTIO_BLK_T_IN 0
#define VIRTIO_BLK_T_OUT 1
#define VIRTIO_BLK_T_FLUSH 4
#define VIRTIO_BLK_T_GET_ID 8
#define VIRTIO_BLK_T_DISCARD 11
#define VIRTIO_BLK_T_WRITE_ZEROES 13
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
enum vhost_user_request {
VHOST_USER_NONE = 0,
VHOST_USER_GET_FEATURES = 1,
VHOST_USER_SET_FEATURES = 2,
VHOST_USER_SET_OWNER = 3,
VHOST_USER_RESET_OWNER = 4,
VHOST_USER_SET_MEM_TABLE = 5,
VHOST_USER_SET_LOG_BASE = 6,
VHOST_USER_SET_LOG_FD = 7,
VHOST_USER_SET_VRING_NUM = 8,
VHOST_USER_SET_VRING_ADDR = 9,
VHOST_USER_SET_VRING_BASE = 10,
VHOST_USER_GET_VRING_BASE = 11,
VHOST_USER_SET_VRING_KICK = 12,
VHOST_USER_SET_VRING_CALL = 13,
VHOST_USER_SET_VRING_ERR = 14,
VHOST_USER_GET_PROTOCOL_FEATURES = 15,
VHOST_USER_SET_PROTOCOL_FEATURES = 16,
VHOST_USER_GET_QUEUE_NUM = 17,
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_MAX
};
/** Get/set config msg payload */
struct vhost_user_config {
uint32_t offset;
uint32_t size;
uint32_t flags;
uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
};
/** Fixed-size vhost_memory struct */
struct vhost_memory_padded {
uint32_t nregions;
uint32_t padding;
struct vhost_memory_region regions[VHOST_USER_MEMORY_MAX_NREGIONS];
};
struct vhost_user_msg {
enum vhost_user_request request;
#define VHOST_USER_VERSION_MASK 0x3
#define VHOST_USER_REPLY_MASK (0x1 << 2)
uint32_t flags;
uint32_t size; /**< the following payload size */
union {
#define VHOST_USER_VRING_IDX_MASK 0xff
#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
uint64_t u64;
struct vhost_vring_state state;
struct vhost_vring_addr addr;
struct vhost_memory_padded memory;
struct vhost_user_config cfg;
} payload;
} __attribute((packed));
#endif

View File

@ -0,0 +1,21 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
# meson file, for building this example as part of a main DPDK build.
#
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
if not is_linux
build = false
endif
if not cc.has_header('linux/virtio_blk.h')
build = false
endif
deps += 'vhost'
allow_experimental_apis = true
sources = files(
'blk.c', 'vhost_blk.c', 'vhost_blk_compat.c'
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _VHOST_BLK_H_
#define _VHOST_BLK_H_
#include <stdio.h>
#include <sys/uio.h>
#include <stdint.h>
#include <stdbool.h>
#include <linux/virtio_blk.h>
#include <linux/virtio_ring.h>
#include <rte_vhost.h>
#ifndef VIRTIO_F_RING_PACKED
#define VIRTIO_F_RING_PACKED 34
struct vring_packed_desc {
/* Buffer Address. */
__le64 addr;
/* Buffer Length. */
__le32 len;
/* Buffer ID. */
__le16 id;
/* The flags depending on descriptor type. */
__le16 flags;
};
#endif
struct vhost_blk_queue {
struct rte_vhost_vring vq;
struct rte_vhost_ring_inflight inflight_vq;
uint16_t last_avail_idx;
uint16_t last_used_idx;
bool avail_wrap_counter;
bool used_wrap_counter;
};
#define NUM_OF_BLK_QUEUES 1
#define min(a, b) (((a) < (b)) ? (a) : (b))
struct vhost_block_dev {
/** ID for vhost library. */
int vid;
/** Queues for the block device */
struct vhost_blk_queue queues[NUM_OF_BLK_QUEUES];
/** Unique name for this block device. */
char name[64];
/** Unique product name for this kind of block device. */
char product_name[256];
/** Size in bytes of a logical block for the backend */
uint32_t blocklen;
/** Number of blocks */
uint64_t blockcnt;
/** write cache enabled, not used at the moment */
int write_cache;
/** use memory as disk storage space */
uint8_t *data;
};
struct vhost_blk_ctrlr {
uint8_t started;
uint8_t packed_ring;
uint8_t need_restart;
/** Only support 1 LUN for the example */
struct vhost_block_dev *bdev;
/** VM memory region */
struct rte_vhost_memory *mem;
} __rte_cache_aligned;
#define VHOST_BLK_MAX_IOVS 128
enum blk_data_dir {
BLK_DIR_NONE = 0,
BLK_DIR_TO_DEV = 1,
BLK_DIR_FROM_DEV = 2,
};
struct vhost_blk_task {
uint8_t readtype;
uint8_t req_idx;
uint16_t head_idx;
uint16_t last_idx;
uint16_t inflight_idx;
uint16_t buffer_id;
uint32_t dxfer_dir;
uint32_t data_len;
struct virtio_blk_outhdr *req;
volatile uint8_t *status;
struct iovec iovs[VHOST_BLK_MAX_IOVS];
uint32_t iovs_cnt;
struct vring_packed_desc *desc_packed;
struct vring_desc *desc_split;
struct rte_vhost_vring *vq;
struct vhost_block_dev *bdev;
struct vhost_blk_ctrlr *ctrlr;
};
struct inflight_blk_task {
struct vhost_blk_task blk_task;
struct rte_vhost_inflight_desc_packed *inflight_desc;
struct rte_vhost_inflight_info_packed *inflight_packed;
};
struct vhost_blk_ctrlr *g_vhost_ctrlr;
struct vhost_device_ops vhost_blk_device_ops;
int vhost_bdev_process_blk_commands(struct vhost_block_dev *bdev,
struct vhost_blk_task *task);
void vhost_session_install_rte_compat_hooks(uint32_t vid);
void vhost_dev_install_rte_compat_hooks(const char *path);
struct vhost_blk_ctrlr *vhost_blk_ctrlr_find(const char *ctrlr_name);
#endif /* _VHOST_blk_H_ */

View File

@ -0,0 +1,174 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2019 Intel Corporation
*/
#ifndef _VHOST_BLK_COMPAT_H_
#define _VHOST_BLK_COMPAT_H_
#include <sys/uio.h>
#include <stdint.h>
#include <linux/virtio_blk.h>
#include <linux/virtio_ring.h>
#include <rte_vhost.h>
#include "vhost_blk.h"
#include "blk_spec.h"
#define VHOST_MAX_VQUEUES 256
#define SPDK_VHOST_MAX_VQ_SIZE 1024
#define VHOST_USER_GET_CONFIG 24
#define VHOST_USER_SET_CONFIG 25
static int
vhost_blk_get_config(struct vhost_block_dev *bdev, uint8_t *config,
uint32_t len)
{
struct virtio_blk_config blkcfg;
uint32_t blk_size;
uint64_t blkcnt;
if (bdev == NULL) {
/* We can't just return -1 here as this GET_CONFIG message might
* be caused by a QEMU VM reboot. Returning -1 will indicate an
* error to QEMU, who might then decide to terminate itself.
* We don't want that. A simple reboot shouldn't break the
* system.
*
* Presenting a block device with block size 0 and block count 0
* doesn't cause any problems on QEMU side and the virtio-pci
* device is even still available inside the VM, but there will
* be no block device created for it - the kernel drivers will
* silently reject it.
*/
blk_size = 0;
blkcnt = 0;
} else {
blk_size = bdev->blocklen;
blkcnt = bdev->blockcnt;
}
memset(&blkcfg, 0, sizeof(blkcfg));
blkcfg.blk_size = blk_size;
/* minimum I/O size in blocks */
blkcfg.min_io_size = 1;
/* expressed in 512 Bytes sectors */
blkcfg.capacity = (blkcnt * blk_size) / 512;
/* QEMU can overwrite this value when started */
blkcfg.num_queues = VHOST_MAX_VQUEUES;
fprintf(stdout, "block device:blk_size = %d, blkcnt = %"PRIx64"\n",
blk_size, blkcnt);
memcpy(config, &blkcfg, min(len, sizeof(blkcfg)));
return 0;
}
static enum rte_vhost_msg_result
extern_vhost_pre_msg_handler(int vid, void *_msg)
{
char path[PATH_MAX];
struct vhost_blk_ctrlr *ctrlr;
struct vhost_user_msg *msg = _msg;
int ret;
ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
if (ret) {
fprintf(stderr, "Cannot get socket name\n");
return -1;
}
ctrlr = vhost_blk_ctrlr_find(path);
if (!ctrlr) {
fprintf(stderr, "Controller is not ready\n");
return -1;
}
switch ((int)msg->request) {
case VHOST_USER_GET_VRING_BASE:
case VHOST_USER_SET_VRING_BASE:
case VHOST_USER_SET_VRING_ADDR:
case VHOST_USER_SET_VRING_NUM:
case VHOST_USER_SET_VRING_KICK:
case VHOST_USER_SET_VRING_CALL:
case VHOST_USER_SET_MEM_TABLE:
break;
case VHOST_USER_GET_CONFIG: {
int rc = 0;
rc = vhost_blk_get_config(ctrlr->bdev,
msg->payload.cfg.region,
msg->payload.cfg.size);
if (rc != 0)
msg->size = 0;
return RTE_VHOST_MSG_RESULT_REPLY;
}
case VHOST_USER_SET_CONFIG:
default:
break;
}
return RTE_VHOST_MSG_RESULT_NOT_HANDLED;
}
static enum rte_vhost_msg_result
extern_vhost_post_msg_handler(int vid, void *_msg)
{
char path[PATH_MAX];
struct vhost_blk_ctrlr *ctrlr;
struct vhost_user_msg *msg = _msg;
int ret;
ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
if (ret) {
fprintf(stderr, "Cannot get socket name\n");
return -1;
}
ctrlr = vhost_blk_ctrlr_find(path);
if (!ctrlr) {
fprintf(stderr, "Controller is not ready\n");
return -1;
}
switch (msg->request) {
case VHOST_USER_SET_FEATURES:
case VHOST_USER_SET_VRING_KICK:
default:
break;
}
return RTE_VHOST_MSG_RESULT_NOT_HANDLED;
}
struct rte_vhost_user_extern_ops g_extern_vhost_ops = {
.pre_msg_handle = extern_vhost_pre_msg_handler,
.post_msg_handle = extern_vhost_post_msg_handler,
};
void
vhost_session_install_rte_compat_hooks(uint32_t vid)
{
int rc;
rc = rte_vhost_extern_callback_register(vid, &g_extern_vhost_ops, NULL);
if (rc != 0)
fprintf(stderr,
"rte_vhost_extern_callback_register() failed for vid = %d\n",
vid);
}
void
vhost_dev_install_rte_compat_hooks(const char *path)
{
uint64_t protocol_features = 0;
rte_vhost_driver_get_protocol_features(path, &protocol_features);
protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD);
rte_vhost_driver_set_protocol_features(path, protocol_features);
}
#endif