diff --git a/include/spdk/bdev.h b/include/spdk/bdev.h index a83ab3808f..d3c88bfeb9 100644 --- a/include/spdk/bdev.h +++ b/include/spdk/bdev.h @@ -45,8 +45,8 @@ #include "spdk/queue.h" #include "spdk/scsi_spec.h" -#define SPDK_BDEV_SMALL_RBUF_MAX_SIZE 8192 -#define SPDK_BDEV_LARGE_RBUF_MAX_SIZE (64 * 1024) +#define SPDK_BDEV_SMALL_BUF_MAX_SIZE 8192 +#define SPDK_BDEV_LARGE_BUF_MAX_SIZE (64 * 1024) #define SPDK_BDEV_MAX_NAME_LENGTH 16 #define SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH 50 diff --git a/include/spdk_internal/bdev.h b/include/spdk_internal/bdev.h index 03edcf7089..9de340454f 100644 --- a/include/spdk_internal/bdev.h +++ b/include/spdk_internal/bdev.h @@ -145,7 +145,7 @@ struct spdk_bdev_fn_table { int (*dump_config_json)(void *ctx, struct spdk_json_write_ctx *w); }; -typedef void (*spdk_bdev_io_get_rbuf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); +typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); struct spdk_bdev_io { /** The block device that this I/O belongs to. */ @@ -163,7 +163,7 @@ struct spdk_bdev_io { union { struct { - /** The unaligned rbuf originally allocated. */ + /** The unaligned buf originally allocated. */ void *buf_unaligned; /** For basic read case, use our own iovec element. */ @@ -181,8 +181,8 @@ struct spdk_bdev_io { /** Starting offset (in bytes) of the blockdev for this I/O. */ uint64_t offset; - /** Indicate whether the blockdev layer to put rbuf or not. */ - bool put_rbuf; + /** Indicate whether the blockdev layer to put buf or not. */ + bool put_buf; } read; struct { /** For basic write case, use our own iovec element */ @@ -247,8 +247,8 @@ struct spdk_bdev_io { /** Context that will be passed to the completion callback */ void *caller_ctx; - /** Callback for when rbuf is allocated */ - spdk_bdev_io_get_rbuf_cb get_rbuf_cb; + /** Callback for when buf is allocated */ + spdk_bdev_io_get_buf_cb get_buf_cb; /** Status for the IO */ enum spdk_bdev_io_status status; @@ -271,7 +271,7 @@ struct spdk_bdev_io { TAILQ_ENTRY(spdk_bdev_io) link; /** Entry to the list need_buf of struct spdk_bdev. */ - TAILQ_ENTRY(spdk_bdev_io) rbuf_link; + TAILQ_ENTRY(spdk_bdev_io) buf_link; /** Per I/O context for use by the blockdev module */ uint8_t driver_ctx[0]; @@ -280,7 +280,7 @@ struct spdk_bdev_io { }; void spdk_bdev_register(struct spdk_bdev *bdev); -void spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_rbuf_cb cb); +void spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb); struct spdk_bdev_io *spdk_bdev_get_io(void); struct spdk_bdev_io *spdk_bdev_get_child_io(struct spdk_bdev_io *parent, struct spdk_bdev *bdev, diff --git a/lib/bdev/aio/blockdev_aio.c b/lib/bdev/aio/blockdev_aio.c index a928ea8ffd..596911295e 100644 --- a/lib/bdev/aio/blockdev_aio.c +++ b/lib/bdev/aio/blockdev_aio.c @@ -224,7 +224,7 @@ blockdev_aio_reset(struct file_disk *fdisk, struct blockdev_aio_task *aio_task) spdk_bdev_io_complete(spdk_bdev_io_from_ctx(aio_task), SPDK_BDEV_IO_STATUS_SUCCESS); } -static void blockdev_aio_get_rbuf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) +static void blockdev_aio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) { blockdev_aio_readv((struct file_disk *)bdev_io->bdev->ctxt, ch, @@ -239,7 +239,7 @@ static int _blockdev_aio_submit_request(struct spdk_io_channel *ch, struct spdk_ { switch (bdev_io->type) { case SPDK_BDEV_IO_TYPE_READ: - spdk_bdev_io_get_rbuf(bdev_io, blockdev_aio_get_rbuf_cb); + spdk_bdev_io_get_buf(bdev_io, blockdev_aio_get_buf_cb); return 0; case SPDK_BDEV_IO_TYPE_WRITE: diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index 6e353568bc..a5bb7a178e 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -51,16 +51,16 @@ #include "spdk_internal/log.h" #define SPDK_BDEV_IO_POOL_SIZE (64 * 1024) -#define RBUF_SMALL_POOL_SIZE 8192 -#define RBUF_LARGE_POOL_SIZE 1024 +#define BUF_SMALL_POOL_SIZE 8192 +#define BUF_LARGE_POOL_SIZE 1024 static struct rte_mempool *spdk_bdev_g_io_pool = NULL; -static struct rte_mempool *g_rbuf_small_pool = NULL; -static struct rte_mempool *g_rbuf_large_pool = NULL; +static struct rte_mempool *g_buf_small_pool = NULL; +static struct rte_mempool *g_buf_large_pool = NULL; -typedef TAILQ_HEAD(, spdk_bdev_io) need_rbuf_tailq_t; -static need_rbuf_tailq_t g_need_rbuf_small[RTE_MAX_LCORE]; -static need_rbuf_tailq_t g_need_rbuf_large[RTE_MAX_LCORE]; +typedef TAILQ_HEAD(, spdk_bdev_io) need_buf_tailq_t; +static need_buf_tailq_t g_need_buf_small[RTE_MAX_LCORE]; +static need_buf_tailq_t g_need_buf_large[RTE_MAX_LCORE]; static TAILQ_HEAD(, spdk_bdev_module_if) spdk_bdev_module_list = TAILQ_HEAD_INITIALIZER(spdk_bdev_module_list); @@ -116,26 +116,26 @@ struct spdk_bdev *spdk_bdev_get_by_name(const char *bdev_name) } static void -spdk_bdev_io_set_rbuf(struct spdk_bdev_io *bdev_io, void *buf) +spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf) { - assert(bdev_io->get_rbuf_cb != NULL); + assert(bdev_io->get_buf_cb != NULL); assert(buf != NULL); assert(bdev_io->u.read.iovs != NULL); bdev_io->u.read.buf_unaligned = buf; bdev_io->u.read.iovs[0].iov_base = (void *)((unsigned long)((char *)buf + 512) & ~511UL); bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; - bdev_io->u.read.put_rbuf = true; - bdev_io->get_rbuf_cb(bdev_io->ch->channel, bdev_io); + bdev_io->u.read.put_buf = true; + bdev_io->get_buf_cb(bdev_io->ch->channel, bdev_io); } static void -spdk_bdev_io_put_rbuf(struct spdk_bdev_io *bdev_io) +spdk_bdev_io_put_buf(struct spdk_bdev_io *bdev_io) { struct rte_mempool *pool; struct spdk_bdev_io *tmp; void *buf; - need_rbuf_tailq_t *tailq; + need_buf_tailq_t *tailq; uint64_t length; assert(bdev_io->u.read.iovcnt == 1); @@ -143,24 +143,24 @@ spdk_bdev_io_put_rbuf(struct spdk_bdev_io *bdev_io) length = bdev_io->u.read.len; buf = bdev_io->u.read.buf_unaligned; - if (length <= SPDK_BDEV_SMALL_RBUF_MAX_SIZE) { - pool = g_rbuf_small_pool; - tailq = &g_need_rbuf_small[rte_lcore_id()]; + if (length <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) { + pool = g_buf_small_pool; + tailq = &g_need_buf_small[rte_lcore_id()]; } else { - pool = g_rbuf_large_pool; - tailq = &g_need_rbuf_large[rte_lcore_id()]; + pool = g_buf_large_pool; + tailq = &g_need_buf_large[rte_lcore_id()]; } if (TAILQ_EMPTY(tailq)) { rte_mempool_put(pool, buf); } else { tmp = TAILQ_FIRST(tailq); - TAILQ_REMOVE(tailq, tmp, rbuf_link); - spdk_bdev_io_set_rbuf(tmp, buf); + TAILQ_REMOVE(tailq, tmp, buf_link); + spdk_bdev_io_set_buf(tmp, buf); } } -static int spdk_initialize_rbuf_pool(void) +static int spdk_initialize_buf_pool(void) { int cache_size; @@ -169,29 +169,29 @@ static int spdk_initialize_rbuf_pool(void) * using spdk_event_get_active_core_count() to determine how many local caches we need * to account for. */ - cache_size = RBUF_SMALL_POOL_SIZE / (2 * spdk_env_get_core_count()); + cache_size = BUF_SMALL_POOL_SIZE / (2 * spdk_env_get_core_count()); if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; - g_rbuf_small_pool = rte_mempool_create("rbuf_small_pool", - RBUF_SMALL_POOL_SIZE, - SPDK_BDEV_SMALL_RBUF_MAX_SIZE + 512, - cache_size, 0, NULL, NULL, NULL, NULL, - SOCKET_ID_ANY, 0); - if (!g_rbuf_small_pool) { - SPDK_ERRLOG("create rbuf small pool failed\n"); + g_buf_small_pool = rte_mempool_create("buf_small_pool", + BUF_SMALL_POOL_SIZE, + SPDK_BDEV_SMALL_BUF_MAX_SIZE + 512, + cache_size, 0, NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!g_buf_small_pool) { + SPDK_ERRLOG("create buf small pool failed\n"); return -1; } - cache_size = RBUF_LARGE_POOL_SIZE / (2 * spdk_env_get_core_count()); + cache_size = BUF_LARGE_POOL_SIZE / (2 * spdk_env_get_core_count()); if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; - g_rbuf_large_pool = rte_mempool_create("rbuf_large_pool", - RBUF_LARGE_POOL_SIZE, - SPDK_BDEV_LARGE_RBUF_MAX_SIZE + 512, - cache_size, 0, NULL, NULL, NULL, NULL, - SOCKET_ID_ANY, 0); - if (!g_rbuf_large_pool) { - SPDK_ERRLOG("create rbuf large pool failed\n"); + g_buf_large_pool = rte_mempool_create("buf_large_pool", + BUF_LARGE_POOL_SIZE, + SPDK_BDEV_LARGE_BUF_MAX_SIZE + 512, + cache_size, 0, NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!g_buf_large_pool) { + SPDK_ERRLOG("create buf large pool failed\n"); return -1; } @@ -297,11 +297,11 @@ spdk_bdev_initialize(void) } for (i = 0; i < RTE_MAX_LCORE; i++) { - TAILQ_INIT(&g_need_rbuf_small[i]); - TAILQ_INIT(&g_need_rbuf_large[i]); + TAILQ_INIT(&g_need_buf_small[i]); + TAILQ_INIT(&g_need_buf_large[i]); } - return spdk_initialize_rbuf_pool(); + return spdk_initialize_buf_pool(); } /* @@ -334,8 +334,8 @@ spdk_bdev_finish(void) spdk_bdev_module_finish(); - rc += spdk_bdev_check_pool(g_rbuf_small_pool, RBUF_SMALL_POOL_SIZE); - rc += spdk_bdev_check_pool(g_rbuf_large_pool, RBUF_LARGE_POOL_SIZE); + rc += spdk_bdev_check_pool(g_buf_small_pool, BUF_SMALL_POOL_SIZE); + rc += spdk_bdev_check_pool(g_buf_large_pool, BUF_LARGE_POOL_SIZE); return (rc != 0); } @@ -363,54 +363,54 @@ spdk_bdev_put_io(struct spdk_bdev_io *bdev_io) return; } - if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ && bdev_io->u.read.put_rbuf) { - spdk_bdev_io_put_rbuf(bdev_io); + if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ && bdev_io->u.read.put_buf) { + spdk_bdev_io_put_buf(bdev_io); } rte_mempool_put(spdk_bdev_g_io_pool, bdev_io); } static void -_spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io) +_spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io) { uint64_t len = bdev_io->u.read.len; struct rte_mempool *pool; - need_rbuf_tailq_t *tailq; + need_buf_tailq_t *tailq; int rc; void *buf = NULL; - if (len <= SPDK_BDEV_SMALL_RBUF_MAX_SIZE) { - pool = g_rbuf_small_pool; - tailq = &g_need_rbuf_small[rte_lcore_id()]; + if (len <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) { + pool = g_buf_small_pool; + tailq = &g_need_buf_small[rte_lcore_id()]; } else { - pool = g_rbuf_large_pool; - tailq = &g_need_rbuf_large[rte_lcore_id()]; + pool = g_buf_large_pool; + tailq = &g_need_buf_large[rte_lcore_id()]; } rc = rte_mempool_get(pool, (void **)&buf); if (rc < 0 || !buf) { - TAILQ_INSERT_TAIL(tailq, bdev_io, rbuf_link); + TAILQ_INSERT_TAIL(tailq, bdev_io, buf_link); } else { - spdk_bdev_io_set_rbuf(bdev_io, buf); + spdk_bdev_io_set_buf(bdev_io, buf); } } static void -spdk_bdev_cleanup_pending_rbuf_io(struct spdk_bdev *bdev) +spdk_bdev_cleanup_pending_buf_io(struct spdk_bdev *bdev) { struct spdk_bdev_io *bdev_io, *tmp; - TAILQ_FOREACH_SAFE(bdev_io, &g_need_rbuf_small[rte_lcore_id()], rbuf_link, tmp) { + TAILQ_FOREACH_SAFE(bdev_io, &g_need_buf_small[rte_lcore_id()], buf_link, tmp) { if (bdev_io->bdev == bdev) { - TAILQ_REMOVE(&g_need_rbuf_small[rte_lcore_id()], bdev_io, rbuf_link); + TAILQ_REMOVE(&g_need_buf_small[rte_lcore_id()], bdev_io, buf_link); bdev_io->status = SPDK_BDEV_IO_STATUS_FAILED; } } - TAILQ_FOREACH_SAFE(bdev_io, &g_need_rbuf_large[rte_lcore_id()], rbuf_link, tmp) { + TAILQ_FOREACH_SAFE(bdev_io, &g_need_buf_large[rte_lcore_id()], buf_link, tmp) { if (bdev_io->bdev == bdev) { - TAILQ_REMOVE(&g_need_rbuf_large[rte_lcore_id()], bdev_io, rbuf_link); + TAILQ_REMOVE(&g_need_buf_large[rte_lcore_id()], bdev_io, buf_link); bdev_io->status = SPDK_BDEV_IO_STATUS_FAILED; } } @@ -424,7 +424,7 @@ __submit_request(struct spdk_bdev *bdev, struct spdk_bdev_io *bdev_io) assert(bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING); if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { - spdk_bdev_cleanup_pending_rbuf_io(bdev); + spdk_bdev_cleanup_pending_buf_io(bdev); ch = NULL; } else { ch = bdev_io->ch->channel; @@ -496,9 +496,9 @@ spdk_bdev_get_child_io(struct spdk_bdev_io *parent, child->type = parent->type; memcpy(&child->u, &parent->u, sizeof(child->u)); if (child->type == SPDK_BDEV_IO_TYPE_READ) { - child->u.read.put_rbuf = false; + child->u.read.put_buf = false; } - child->get_rbuf_cb = NULL; + child->get_buf_cb = NULL; child->parent = parent; TAILQ_INSERT_TAIL(&parent->child_io, child, link); @@ -599,7 +599,7 @@ spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch, bdev_io->u.read.iovcnt = 1; bdev_io->u.read.len = nbytes; bdev_io->u.read.offset = offset; - bdev_io->u.read.put_rbuf = false; + bdev_io->u.read.put_buf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); @@ -638,7 +638,7 @@ spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch, bdev_io->u.read.iovcnt = iovcnt; bdev_io->u.read.len = nbytes; bdev_io->u.read.offset = offset; - bdev_io->u.read.put_rbuf = false; + bdev_io->u.read.put_buf = false; spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb); rc = spdk_bdev_io_submit(bdev_io); @@ -853,7 +853,7 @@ spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) TAILQ_REMOVE(&bdev_io->child_io, child_io, link); /* - * Child I/O may have an rbuf that needs to be returned to a pool + * Child I/O may have a buf that needs to be returned to a pool * on a different core, so free it through the request submission * process rather than calling put_io directly here. */ @@ -1094,14 +1094,14 @@ spdk_bdev_unclaim(struct spdk_bdev *bdev) } void -spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_rbuf_cb cb) +spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb) { assert(cb != NULL); assert(bdev_io->u.read.iovs != NULL); if (bdev_io->u.read.iovs[0].iov_base == NULL) { - bdev_io->get_rbuf_cb = cb; - _spdk_bdev_io_get_rbuf(bdev_io); + bdev_io->get_buf_cb = cb; + _spdk_bdev_io_get_buf(bdev_io); } else { cb(bdev_io->ch->channel, bdev_io); } diff --git a/lib/bdev/malloc/blockdev_malloc.c b/lib/bdev/malloc/blockdev_malloc.c index f9644f8401..de1d8098d0 100644 --- a/lib/bdev/malloc/blockdev_malloc.c +++ b/lib/bdev/malloc/blockdev_malloc.c @@ -280,7 +280,7 @@ static int _blockdev_malloc_submit_request(struct spdk_io_channel *ch, struct sp ((struct malloc_disk *)bdev_io->bdev->ctxt)->malloc_buf + bdev_io->u.read.offset; bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len; - bdev_io->u.read.put_rbuf = false; + bdev_io->u.read.put_buf = false; spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bdev_io->driver_ctx), SPDK_BDEV_IO_STATUS_SUCCESS); return 0; diff --git a/lib/bdev/nvme/blockdev_nvme.c b/lib/bdev/nvme/blockdev_nvme.c index a307273852..5103921d26 100644 --- a/lib/bdev/nvme/blockdev_nvme.c +++ b/lib/bdev/nvme/blockdev_nvme.c @@ -255,7 +255,7 @@ bdev_nvme_unmap(struct nvme_bdev *nbdev, struct spdk_io_channel *ch, uint16_t bdesc_count); static void -bdev_nvme_get_rbuf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) +bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) { int ret; @@ -277,7 +277,7 @@ _bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_ { switch (bdev_io->type) { case SPDK_BDEV_IO_TYPE_READ: - spdk_bdev_io_get_rbuf(bdev_io, bdev_nvme_get_rbuf_cb); + spdk_bdev_io_get_buf(bdev_io, bdev_nvme_get_buf_cb); return 0; case SPDK_BDEV_IO_TYPE_WRITE: diff --git a/lib/bdev/rbd/blockdev_rbd.c b/lib/bdev/rbd/blockdev_rbd.c index 6ea916118b..7f04203cbd 100644 --- a/lib/bdev/rbd/blockdev_rbd.c +++ b/lib/bdev/rbd/blockdev_rbd.c @@ -252,7 +252,7 @@ blockdev_rbd_destruct(void *ctx) return 0; } -static void blockdev_rbd_get_rbuf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) +static void blockdev_rbd_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) { int ret; @@ -273,7 +273,7 @@ static int _blockdev_rbd_submit_request(struct spdk_io_channel *ch, struct spdk_ { switch (bdev_io->type) { case SPDK_BDEV_IO_TYPE_READ: - spdk_bdev_io_get_rbuf(bdev_io, blockdev_rbd_get_rbuf_cb); + spdk_bdev_io_get_buf(bdev_io, blockdev_rbd_get_buf_cb); return 0; case SPDK_BDEV_IO_TYPE_WRITE: diff --git a/lib/iscsi/iscsi.c b/lib/iscsi/iscsi.c index c6e30bb64c..3db515d04b 100644 --- a/lib/iscsi/iscsi.c +++ b/lib/iscsi/iscsi.c @@ -2866,7 +2866,7 @@ int spdk_iscsi_conn_handle_queued_tasks(struct spdk_iscsi_conn *conn) subtask = spdk_iscsi_task_get(&conn->pending_task_cnt, task); assert(subtask != NULL); subtask->scsi.offset = task->current_datain_offset; - subtask->scsi.length = DMIN32(SPDK_BDEV_LARGE_RBUF_MAX_SIZE, remaining_size); + subtask->scsi.length = DMIN32(SPDK_BDEV_LARGE_BUF_MAX_SIZE, remaining_size); spdk_scsi_task_set_data(&subtask->scsi, NULL, 0); spdk_iscsi_queue_task(conn, subtask); task->current_datain_offset += subtask->scsi.length; @@ -2888,7 +2888,7 @@ static int spdk_iscsi_op_scsi_read(struct spdk_iscsi_conn *conn, task->scsi.dxfer_dir = SPDK_SCSI_DIR_FROM_DEV; task->scsi.parent = NULL; task->scsi.offset = 0; - task->scsi.length = DMIN32(SPDK_BDEV_LARGE_RBUF_MAX_SIZE, task->scsi.transfer_len); + task->scsi.length = DMIN32(SPDK_BDEV_LARGE_BUF_MAX_SIZE, task->scsi.transfer_len); spdk_scsi_task_set_data(&task->scsi, NULL, 0); remaining_size = task->scsi.transfer_len - task->scsi.length; diff --git a/lib/iscsi/iscsi.h b/lib/iscsi/iscsi.h index de9fe95b99..01c34f3581 100644 --- a/lib/iscsi/iscsi.h +++ b/lib/iscsi/iscsi.h @@ -90,10 +90,10 @@ #define SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH 65536 /* - * SPDK iSCSI target will only send a maximum of SPDK_BDEV_LARGE_RBUF_MAX_SIZE data segments, even if the + * SPDK iSCSI target will only send a maximum of SPDK_BDEV_LARGE_BUF_MAX_SIZE data segments, even if the * connection can support more. */ -#define SPDK_ISCSI_MAX_SEND_DATA_SEGMENT_LENGTH SPDK_BDEV_LARGE_RBUF_MAX_SIZE +#define SPDK_ISCSI_MAX_SEND_DATA_SEGMENT_LENGTH SPDK_BDEV_LARGE_BUF_MAX_SIZE /* * Defines maximum number of data out buffers each connection can have in @@ -104,7 +104,7 @@ /* * Defines maximum number of data in buffers each connection can have in * use at any given time. So this limit does not affect I/O smaller than - * SPDK_BDEV_SMALL_RBUF_MAX_SIZE. + * SPDK_BDEV_SMALL_BUF_MAX_SIZE. */ #define MAX_LARGE_DATAIN_PER_CONNECTION 64 diff --git a/test/lib/bdev/bdevperf/bdevperf.c b/test/lib/bdev/bdevperf/bdevperf.c index cf133f8f6c..491a097f44 100644 --- a/test/lib/bdev/bdevperf/bdevperf.c +++ b/test/lib/bdev/bdevperf/bdevperf.c @@ -600,9 +600,9 @@ main(int argc, char **argv) !strcmp(workload_type, "reset") || !strcmp(workload_type, "unmap")) { g_rw_percentage = 50; - if (g_io_size > SPDK_BDEV_LARGE_RBUF_MAX_SIZE) { + if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { fprintf(stderr, "Unable to exceed max I/O size of %d for verify. (%d provided).\n", - SPDK_BDEV_LARGE_RBUF_MAX_SIZE, g_io_size); + SPDK_BDEV_LARGE_BUF_MAX_SIZE, g_io_size); exit(1); } if (core_mask) { @@ -652,9 +652,9 @@ main(int argc, char **argv) g_is_random = 1; } - if (g_io_size > SPDK_BDEV_LARGE_RBUF_MAX_SIZE) { + if (g_io_size > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { fprintf(stdout, "I/O size of %d is greather than zero copy threshold (%d).\n", - g_io_size, SPDK_BDEV_LARGE_RBUF_MAX_SIZE); + g_io_size, SPDK_BDEV_LARGE_BUF_MAX_SIZE); fprintf(stdout, "Zero copy mechanism will not be used.\n"); g_zcopy = false; } diff --git a/test/lib/scsi/lun/lun_ut.c b/test/lib/scsi/lun/lun_ut.c index 99205a6177..98e22a0a37 100644 --- a/test/lib/scsi/lun/lun_ut.c +++ b/test/lib/scsi/lun/lun_ut.c @@ -477,7 +477,7 @@ lun_append_task_null_lun_alloc_len_lt_4096(void) task->cdb[3] = 0; task->cdb[4] = 0; /* alloc_len is set to a minimal value of 4096 - * Hence, rbuf of size 4096 is allocated */ + * Hence, buf of size 4096 is allocated */ spdk_scsi_task_process_null_lun(task); CU_ASSERT_EQUAL(task->status, SPDK_SCSI_STATUS_GOOD);