qlnxr: cleanup secondary effects from EOL purge

DEFINE_ALLOC_MR became unconditionally true, but it isn't used anywhere
now.  Several places depended upon DEFINE_IB_FAST_REG, but that is now
always false.
Similarly DEFINE_IB_UMEM_WITH_CHUNK became always false/undefined.
DEFINE_IB_AH_ATTR_WITH_DMAC is now unconditionally true.

Reviewed by: imp
Pull Request: https://github.com/freebsd/freebsd-src/pull/603
Differential Revision: https://reviews.freebsd.org/D35560
This commit is contained in:
Elliott Mitchell 2022-06-24 16:26:18 -07:00 committed by Warner Losh
parent bbe35708ad
commit 21e0b50796
3 changed files with 29 additions and 85 deletions

View File

@ -498,9 +498,6 @@ qlnxr_gsi_build_header(struct qlnxr_dev *dev,
int ip_ver = 0;
bool has_udp = false;
#if !DEFINE_IB_AH_ATTR_WITH_DMAC
u8 mac[ETH_ALEN];
#endif
int i;
send_size = 0;
@ -525,12 +522,7 @@ qlnxr_gsi_build_header(struct qlnxr_dev *dev,
}
/* ENET + VLAN headers*/
#if DEFINE_IB_AH_ATTR_WITH_DMAC
memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
#else
qlnxr_get_dmac(dev, ah_attr, mac);
memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
#endif
memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
if (has_vlan) {
udh->eth.type = htons(ETH_P_8021Q);

View File

@ -623,15 +623,6 @@ struct mr_info {
u32 completed_handled;
};
#define DEFINE_ALLOC_MR
#ifdef DEFINE_IB_FAST_REG
struct qlnxr_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl;
struct qlnxr_dev *dev;
struct mr_info info;
};
#endif
struct qlnxr_qp {
struct ib_qp ibqp; /* must be first */
struct qlnxr_dev *dev;
@ -675,9 +666,6 @@ struct qlnxr_qp {
bool signaled;
dma_addr_t icrc_mapping;
u32 *icrc;
#ifdef DEFINE_IB_FAST_REG
struct qlnxr_fast_reg_page_list *frmr;
#endif
struct qlnxr_mr *mr;
} *wqe_wr_id;
@ -828,14 +816,6 @@ static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
return 1;
}
#ifdef DEFINE_IB_FAST_REG
static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
struct ib_fast_reg_page_list *ifrpl)
{
return container_of(ifrpl, struct qlnxr_fast_reg_page_list, ibfrpl);
}
#endif
#define SET_FIELD2(value, name, flag) \
do { \
(value) |= ((flag) << (name ## _SHIFT)); \
@ -887,8 +867,6 @@ extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address
#define QLNXR_ROCE_PKEY_TABLE_LEN 1
#define QLNXR_ROCE_PKEY_DEFAULT 0xffff
#define DEFINE_IB_AH_ATTR_WITH_DMAC (1)
#define QLNX_IS_IWARP(rdev) IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
#define QLNX_IS_ROCE(rdev) IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))

View File

@ -1293,13 +1293,7 @@ qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
struct scatterlist *sg;
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
qlnx_host_t *ha;
#ifdef DEFINE_IB_UMEM_WITH_CHUNK
int i;
struct ib_umem_chunk *chunk = NULL;
#else
int entry;
#endif
ha = dev->ha;
@ -1333,53 +1327,42 @@ qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
shift = ilog2(umem->page_size);
#ifndef DEFINE_IB_UMEM_WITH_CHUNK
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
#else
list_for_each_entry(chunk, &umem->chunk_list, list) {
/* get all the dma regions from the chunk. */
for (i = 0; i < chunk->nmap; i++) {
sg = &chunk->page_list[i];
#endif
pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */
pbe->lo =
cpu_to_le32(sg_dma_address(sg) +
(umem->page_size * pg_cnt));
pbe->hi =
cpu_to_le32(upper_32_bits
((sg_dma_address(sg) +
umem->page_size * pg_cnt)));
pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */
pbe->lo =
cpu_to_le32(sg_dma_address(sg) +
(umem->page_size * pg_cnt));
pbe->hi =
cpu_to_le32(upper_32_bits
((sg_dma_address(sg) +
umem->page_size * pg_cnt)));
QL_DPRINT12(ha,
"Populate pbl table:"
" pbe->addr=0x%x:0x%x "
" pbe_cnt = %d total_num_pbes=%d"
" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
total_num_pbes, pbe);
QL_DPRINT12(ha,
"Populate pbl table:"
" pbe->addr=0x%x:0x%x "
" pbe_cnt = %d total_num_pbes=%d"
" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
total_num_pbes, pbe);
pbe_cnt ++;
total_num_pbes ++;
pbe++;
pbe_cnt ++;
total_num_pbes ++;
pbe++;
if (total_num_pbes == pbl_info->num_pbes)
return;
if (total_num_pbes == pbl_info->num_pbes)
return;
/* if the given pbl is full storing the pbes,
* move to next pbl.
*/
if (pbe_cnt ==
(pbl_info->pbl_size / sizeof(u64))) {
pbl_tbl++;
pbe = (struct regpair *)pbl_tbl->va;
pbe_cnt = 0;
}
/* if the given pbl is full storing the pbes,
* move to next pbl.
*/
if (pbe_cnt ==
(pbl_info->pbl_size / sizeof(u64))) {
pbl_tbl++;
pbe = (struct regpair *)pbl_tbl->va;
pbe_cnt = 0;
}
#ifdef DEFINE_IB_UMEM_WITH_CHUNK
}
#endif
}
QL_DPRINT12(ha, "exit\n");
return;
@ -2346,9 +2329,6 @@ qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
struct qlnxr_create_srq_ureq *ureq,
int access, int dmasync)
{
#ifdef DEFINE_IB_UMEM_WITH_CHUNK
struct ib_umem_chunk *chunk;
#endif
struct scatterlist *sg;
int rc;
struct qlnxr_dev *dev = srq->dev;
@ -2376,13 +2356,7 @@ qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
return PTR_ERR(srq->prod_umem);
}
#ifdef DEFINE_IB_UMEM_WITH_CHUNK
chunk = container_of((&srq->prod_umem->chunk_list)->next,
typeof(*chunk), list);
sg = &chunk->page_list[0];
#else
sg = srq->prod_umem->sg_head.sgl;
#endif
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
QL_DPRINT12(ha, "exit\n");