nvmf: Rearrange spdk_nvmf_requset for cache line locality
Get all of the hot stuff to the first cache line. * Shrink the xfer enum to one byte (it only has 3 values). * Pull out the dif enabled flag form the dif structure so it can be access separately * Rearrange the members Change-Id: Id4a2fe90a49c055a4672642faac0028671ebfae9 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7827 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
d33f44866d
commit
15ae31fb0c
@ -74,7 +74,6 @@ SPDK_STATIC_ASSERT(sizeof(union nvmf_c2h_msg) == 16, "Incorrect size");
|
||||
|
||||
struct spdk_nvmf_dif_info {
|
||||
struct spdk_dif_ctx dif_ctx;
|
||||
bool dif_insert_or_strip;
|
||||
uint32_t elba_length;
|
||||
uint32_t orig_length;
|
||||
};
|
||||
@ -82,23 +81,27 @@ struct spdk_nvmf_dif_info {
|
||||
struct spdk_nvmf_request {
|
||||
struct spdk_nvmf_qpair *qpair;
|
||||
uint32_t length;
|
||||
enum spdk_nvme_data_transfer xfer;
|
||||
uint8_t xfer; /* type enum spdk_nvme_data_transfer */
|
||||
bool data_from_pool;
|
||||
bool dif_enabled;
|
||||
void *data;
|
||||
union nvmf_h2c_msg *cmd;
|
||||
union nvmf_c2h_msg *rsp;
|
||||
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
||||
STAILQ_ENTRY(spdk_nvmf_request) buf_link;
|
||||
uint64_t timeout_tsc;
|
||||
|
||||
uint32_t iovcnt;
|
||||
bool data_from_pool;
|
||||
struct spdk_bdev_io_wait_entry bdev_io_wait;
|
||||
struct iovec iov[NVMF_REQ_MAX_BUFFERS];
|
||||
void *buffers[NVMF_REQ_MAX_BUFFERS];
|
||||
|
||||
struct spdk_nvmf_dif_info dif;
|
||||
|
||||
struct spdk_bdev_io_wait_entry bdev_io_wait;
|
||||
spdk_nvmf_nvme_passthru_cmd_cb cmd_cb_fn;
|
||||
struct spdk_nvmf_request *first_fused_req;
|
||||
struct spdk_nvmf_request *req_to_abort;
|
||||
struct spdk_poller *poller;
|
||||
uint64_t timeout_tsc;
|
||||
|
||||
STAILQ_ENTRY(spdk_nvmf_request) buf_link;
|
||||
TAILQ_ENTRY(spdk_nvmf_request) link;
|
||||
};
|
||||
|
||||
|
@ -1451,7 +1451,7 @@ nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
|
||||
uint32_t lkey, remaining;
|
||||
int rc;
|
||||
|
||||
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(rdma_req->req.dif_enabled)) {
|
||||
dif_ctx = &rdma_req->req.dif.dif_ctx;
|
||||
remaining_data_block = dif_ctx->block_size - dif_ctx->md_size;
|
||||
}
|
||||
@ -1590,7 +1590,7 @@ nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
|
||||
rdma_req->iovpos = 0;
|
||||
|
||||
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(req->dif_enabled)) {
|
||||
num_wrs = nvmf_rdma_calc_num_wrs(length, rtransport->transport.opts.io_unit_size,
|
||||
req->dif.dif_ctx.block_size);
|
||||
if (num_wrs > 1) {
|
||||
@ -1649,7 +1649,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
|
||||
|
||||
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
|
||||
for (i = 0; i < num_sgl_descriptors; i++) {
|
||||
if (spdk_likely(!req->dif.dif_insert_or_strip)) {
|
||||
if (spdk_likely(!req->dif_enabled)) {
|
||||
lengths[i] = desc->keyed.length;
|
||||
} else {
|
||||
req->dif.orig_length += desc->keyed.length;
|
||||
@ -1763,7 +1763,7 @@ nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
/* fill request length and populate iovs */
|
||||
req->length = length;
|
||||
|
||||
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(req->dif_enabled)) {
|
||||
req->dif.orig_length = length;
|
||||
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
|
||||
req->dif.elba_length = length;
|
||||
@ -1936,7 +1936,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
}
|
||||
|
||||
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&rdma_req->req, &rdma_req->req.dif.dif_ctx))) {
|
||||
rdma_req->req.dif.dif_insert_or_strip = true;
|
||||
rdma_req->req.dif_enabled = true;
|
||||
}
|
||||
|
||||
#ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
|
||||
@ -2039,7 +2039,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0,
|
||||
(uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
|
||||
|
||||
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(rdma_req->req.dif_enabled)) {
|
||||
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
||||
/* generate DIF for write operation */
|
||||
num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
|
||||
@ -2079,7 +2079,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
} else {
|
||||
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
|
||||
}
|
||||
if (spdk_unlikely(rdma_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(rdma_req->req.dif_enabled)) {
|
||||
/* restore the original length */
|
||||
rdma_req->req.length = rdma_req->req.dif.orig_length;
|
||||
|
||||
|
@ -380,7 +380,7 @@ nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp));
|
||||
tcp_req->h2c_offset = 0;
|
||||
tcp_req->has_incapsule_data = false;
|
||||
tcp_req->req.dif.dif_insert_or_strip = false;
|
||||
tcp_req->req.dif_enabled = false;
|
||||
|
||||
TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link);
|
||||
TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link);
|
||||
@ -1464,7 +1464,7 @@ nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
|
||||
pdu->req = tcp_req;
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
|
||||
}
|
||||
|
||||
@ -2110,7 +2110,7 @@ nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
|
||||
|
||||
SPDK_DEBUGLOG(nvmf_tcp, "Data requested length= 0x%x\n", length);
|
||||
|
||||
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(req->dif_enabled)) {
|
||||
req->dif.orig_length = length;
|
||||
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
|
||||
req->dif.elba_length = length;
|
||||
@ -2175,7 +2175,7 @@ nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
|
||||
req->length = length;
|
||||
req->data_from_pool = false;
|
||||
|
||||
if (spdk_unlikely(req->dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(req->dif_enabled)) {
|
||||
length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
|
||||
req->dif.elba_length = length;
|
||||
}
|
||||
@ -2268,7 +2268,7 @@ _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
|
||||
c2h_data->common.plen = plen;
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx;
|
||||
}
|
||||
|
||||
@ -2283,7 +2283,7 @@ _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
|
||||
struct spdk_dif_error err_blk = {};
|
||||
uint32_t mapped_length = 0;
|
||||
@ -2441,7 +2441,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe;
|
||||
|
||||
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) {
|
||||
tcp_req->req.dif.dif_insert_or_strip = true;
|
||||
tcp_req->req.dif_enabled = true;
|
||||
tqpair->pdu_in_progress->dif_ctx = &tcp_req->req.dif.dif_ctx;
|
||||
}
|
||||
|
||||
@ -2542,7 +2542,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
case TCP_REQUEST_STATE_READY_TO_EXECUTE:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
|
||||
tcp_req->req.length = tcp_req->req.dif.elba_length;
|
||||
}
|
||||
@ -2558,7 +2558,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
case TCP_REQUEST_STATE_EXECUTED:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) {
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
tcp_req->req.length = tcp_req->req.dif.orig_length;
|
||||
}
|
||||
|
||||
|
@ -913,7 +913,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs * 8;
|
||||
sgl->keyed.length = data_bs * 4;
|
||||
|
||||
@ -943,7 +943,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs * 4;
|
||||
sgl->keyed.length = data_bs * 4;
|
||||
|
||||
@ -980,7 +980,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs;
|
||||
sgl->keyed.length = data_bs;
|
||||
|
||||
@ -1015,7 +1015,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
|
||||
sgl->keyed.length = data_bs * 4;
|
||||
|
||||
@ -1045,7 +1045,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
|
||||
sgl->keyed.length = data_bs * 4;
|
||||
|
||||
@ -1078,7 +1078,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs * 4;
|
||||
sgl->keyed.length = data_bs * 6;
|
||||
|
||||
@ -1126,7 +1126,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = data_bs * 16;
|
||||
sgl->keyed.length = data_bs * 16;
|
||||
|
||||
@ -1169,7 +1169,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
|
||||
0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = 516;
|
||||
sgl->keyed.length = data_bs * 2;
|
||||
|
||||
@ -1210,7 +1210,7 @@ test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
|
||||
spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
|
||||
SPDK_DIF_TYPE1,
|
||||
SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
|
||||
rdma_req.req.dif.dif_insert_or_strip = true;
|
||||
rdma_req.req.dif_enabled = true;
|
||||
rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
|
||||
sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user