Update iser backend code to use new ibcore APIs.

Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2017-11-16 13:28:00 +00:00
parent 4f939024a8
commit 41dbd9dd1d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bsd_rdma_4_9/; revision=325891
5 changed files with 40 additions and 122 deletions

View File

@ -395,7 +395,7 @@ iser_conn_connect(struct icl_conn *ic, int domain, int socktype,
iser_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(iser_cma_handler, (void *)iser_conn,
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, (void *)iser_conn,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = -PTR_ERR(ib_conn->cma_id);

View File

@ -245,7 +245,7 @@ enum iser_desc_type {
struct iser_data_buf {
struct scatterlist sgl[ISCSI_ISER_SG_TABLESIZE];
void *sg;
unsigned int size;
int size;
unsigned long data_len;
unsigned int dma_nents;
char *copy_buf;
@ -364,12 +364,10 @@ struct iser_device {
* struct iser_reg_resources - Fast registration recources
*
* @mr: memory region
* @frpl: fast reg page list
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
u8 mr_valid:1;
};

View File

@ -51,64 +51,6 @@ iser_reg_desc_put(struct ib_conn *ib_conn,
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
* the original due to possible compaction).
*
* we build a "page vec" under the assumption that the SG meets the RDMA
* alignment requirements. Other then the first and last SG elements, all
* the "internal" elements can be compacted into a list whose elements are
* dma addresses of physical pages. The code supports also the weird case
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/
static int
iser_sg_to_page_vec(struct iser_data_buf *data,
struct ib_device *ibdev, u64 *pages,
int *offset, int *data_size)
{
struct scatterlist *sg, *sgl = data->sgl;
u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
unsigned int dma_len;
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
*offset = (u64) sgl[0].offset & ~MASK_4K;
new_chunk = 1;
cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
start_addr = ib_sg_dma_address(ibdev, sg);
if (new_chunk)
chunk_start = start_addr;
dma_len = ib_sg_dma_len(ibdev, sg);
end_addr = start_addr + dma_len;
total_sz += dma_len;
/* collect page fragments until aligned or end of SG list */
if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
new_chunk = 0;
continue;
}
new_chunk = 1;
/* address of the first page in the contiguous chunk;
masking relevant for the very first SG entry,
which might be unaligned */
page = chunk_start & MASK_4K;
do {
pages[cur_page++] = page;
page += SIZE_4K;
} while (page < end_addr);
}
*data_size = total_sz;
return (cur_page);
}
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
* for RDMA sub-list of a scatter-gather list of memory buffers, and returns
@ -214,46 +156,41 @@ iser_fast_reg_mr(struct icl_iser_pdu *iser_pdu,
{
struct ib_conn *ib_conn = &iser_pdu->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_mr *mr = rsc->mr;
struct ib_reg_wr fastreg_wr;
struct ib_send_wr inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
int ret, offset, size, plen;
int ret, n;
/* if there a single dma entry, dma mr suffices */
if (mem->dma_nents == 1)
return iser_reg_dma(device, mem, reg);
/* rsc is not null */
plen = iser_sg_to_page_vec(mem, device->ib_device,
rsc->frpl->page_list,
&offset, &size);
if (plen * SIZE_4K < size) {
ISER_ERR("fast reg page_list too short to hold this SG");
return (EINVAL);
}
if (!rsc->mr_valid) {
iser_inv_rkey(&inv_wr, rsc->mr);
iser_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
}
n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
if (unlikely(n != mem->size)) {
ISER_ERR("failed to map sg (%d/%d)\n", n, mem->size);
return n < 0 ? n : -EINVAL;
}
/* Prepare FASTREG WR */
memset(&fastreg_wr, 0, sizeof(fastreg_wr));
fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
fastreg_wr.opcode = IB_WR_FAST_REG_MR;
fastreg_wr.wr.fast_reg.iova_start = rsc->frpl->page_list[0] + offset;
fastreg_wr.wr.fast_reg.page_list = rsc->frpl;
fastreg_wr.wr.fast_reg.page_list_len = plen;
fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
fastreg_wr.wr.fast_reg.length = size;
fastreg_wr.wr.fast_reg.rkey = rsc->mr->rkey;
fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
fastreg_wr.wr.opcode = IB_WR_REG_MR;
fastreg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
fastreg_wr.wr.num_sge = 0;
fastreg_wr.mr = mr;
fastreg_wr.key = mr->rkey;
fastreg_wr.access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ;
if (!wr)
wr = &fastreg_wr;
wr = &fastreg_wr.wr;
else
wr->next = &fastreg_wr;
wr->next = &fastreg_wr.wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) {
@ -262,10 +199,10 @@ iser_fast_reg_mr(struct icl_iser_pdu *iser_pdu,
}
rsc->mr_valid = 0;
reg->sge.lkey = rsc->mr->lkey;
reg->rkey = rsc->mr->rkey;
reg->sge.addr = rsc->frpl->page_list[0] + offset;
reg->sge.length = size;
reg->sge.lkey = mr->lkey;
reg->rkey = mr->rkey;
reg->sge.addr = mr->iova;
reg->sge.length = mr->length;
return (ret);
}

View File

@ -200,16 +200,10 @@ iser_cq_callback(struct ib_cq *cq, void *cq_context)
static int
iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device_attr *dev_attr = &device->dev_attr;
int ret, i, max_cqe;
struct ib_device *ib_dev = device->ib_device;
int i, max_cqe;
ret = ib_query_device(device->ib_device, dev_attr);
if (ret) {
ISER_ERR("Query device failed for %s", device->ib_device->name);
return (ret);
}
if (!(dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
ISER_ERR("device %s doesn't support Fastreg, "
"can't register memory", device->ib_device->name);
return (1);
@ -222,25 +216,29 @@ iser_create_device_ib_res(struct iser_device *device)
if (!device->comps)
goto comps_err;
max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
ISER_DBG("using %d CQs, device %s supports %d vectors max_cqe %d",
device->comps_used, device->ib_device->name,
device->ib_device->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(device->ib_device);
device->pd = ib_alloc_pd(device->ib_device, IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(device->pd))
goto pd_err;
for (i = 0; i < device->comps_used; i++) {
struct iser_comp *comp = &device->comps[i];
struct ib_cq_init_attr cq_attr = {
.cqe = max_cqe,
.comp_vector = i,
};
comp->device = device;
comp->cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)comp,
max_cqe, i);
&cq_attr);
if (IS_ERR(comp->cq)) {
comp->cq = NULL;
goto cq_err;
@ -257,9 +255,7 @@ iser_create_device_ib_res(struct iser_device *device)
taskqueue_start_threads(&comp->tq, 1, PI_NET, "iser taskq");
}
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
device->mr = device->pd->__internal_mr;
if (IS_ERR(device->mr))
goto tq_err;
@ -327,35 +323,21 @@ iser_alloc_reg_res(struct ib_device *ib_device,
{
int ret;
res->frpl = ib_alloc_fast_reg_page_list(ib_device,
ISCSI_ISER_SG_TABLESIZE + 1);
if (IS_ERR(res->frpl)) {
ret = -PTR_ERR(res->frpl);
ISER_ERR("Failed to allocate fast reg page list err=%d", ret);
return (ret);
}
res->mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, ISCSI_ISER_SG_TABLESIZE + 1);
if (IS_ERR(res->mr)) {
ret = -PTR_ERR(res->mr);
ISER_ERR("Failed to allocate fast reg mr err=%d", ret);
goto fast_reg_mr_failure;
return (ret);
}
res->mr_valid = 1;
return (0);
fast_reg_mr_failure:
ib_free_fast_reg_page_list(res->frpl);
return (ret);
}
static void
iser_free_reg_res(struct iser_reg_resources *rsc)
{
ib_dereg_mr(rsc->mr);
ib_free_fast_reg_page_list(rsc->frpl);
}
static struct fast_reg_descriptor *

View File

@ -20,6 +20,7 @@ SRCS+= icl_conn_if.h
CFLAGS+= -I${SRCTOP}/sys/
CFLAGS+= -I${SYSDIR}/ofed/include
CFLAGS+= -I${SYSDIR}/ofed/include/uapi
CFLAGS+= -I${SYSDIR}/compat/linuxkpi/common/include
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
CFLAGS+= -DINET6 -DINET