MFC r256694, r256713, r256714.
r256694: iw_cxgbe: iWARP driver for Chelsio T4/T5 chips. This is a straight port of the iw_cxgb4 found in OFED distributions. r256713: iw_cxgbe should have a dependency on t4nex. r256714: Fix typo in previous commit. Approved by: re (hrs)
This commit is contained in:
parent
514a2c2eaa
commit
01cd6364a8
2458
sys/dev/cxgbe/iw_cxgbe/cm.c
Normal file
2458
sys/dev/cxgbe/iw_cxgbe/cm.c
Normal file
File diff suppressed because it is too large
Load Diff
926
sys/dev/cxgbe/iw_cxgbe/cq.c
Normal file
926
sys/dev/cxgbe/iw_cxgbe/cq.c
Normal file
@ -0,0 +1,926 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/ktr.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sbuf.h>
|
||||
|
||||
#include "iw_cxgbe.h"
|
||||
#include "user.h"
|
||||
|
||||
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct adapter *sc = rdev->adap;
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
struct wrqe *wr;
|
||||
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
res_wr = wrtod(wr);
|
||||
memset(res_wr, 0, wr_len);
|
||||
res_wr->op_nres = cpu_to_be32(
|
||||
V_FW_WR_OP(FW_RI_RES_WR) |
|
||||
V_FW_RI_RES_WR_NRES(1) |
|
||||
F_FW_WR_COMPL);
|
||||
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
|
||||
res_wr->cookie = (unsigned long) &wr_wait;
|
||||
res = res_wr->res;
|
||||
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
|
||||
res->u.cq.op = FW_RI_RES_OP_RESET;
|
||||
res->u.cq.iqid = cpu_to_be32(cq->cqid);
|
||||
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
|
||||
t4_wrq_tx(sc, wr);
|
||||
|
||||
c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
|
||||
kfree(cq->sw_queue);
|
||||
contigfree(cq->queue, cq->memsize, M_DEVBUF);
|
||||
c4iw_put_cqid(rdev, cq->cqid, uctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct adapter *sc = rdev->adap;
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
int user = (uctx != &rdev->uctx);
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
int ret;
|
||||
struct wrqe *wr;
|
||||
|
||||
cq->cqid = c4iw_get_cqid(rdev, uctx);
|
||||
if (!cq->cqid) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (!user) {
|
||||
cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
|
||||
if (!cq->sw_queue) {
|
||||
ret = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
|
||||
cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
|
||||
PAGE_SIZE, 0);
|
||||
if (cq->queue)
|
||||
cq->dma_addr = vtophys(cq->queue);
|
||||
else {
|
||||
ret = -ENOMEM;
|
||||
goto err3;
|
||||
}
|
||||
|
||||
pci_unmap_addr_set(cq, mapping, cq->dma_addr);
|
||||
memset(cq->queue, 0, cq->memsize);
|
||||
|
||||
/* build fw_ri_res_wr */
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
res_wr = wrtod(wr);
|
||||
|
||||
memset(res_wr, 0, wr_len);
|
||||
res_wr->op_nres = cpu_to_be32(
|
||||
V_FW_WR_OP(FW_RI_RES_WR) |
|
||||
V_FW_RI_RES_WR_NRES(1) |
|
||||
F_FW_WR_COMPL);
|
||||
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
|
||||
res_wr->cookie = (unsigned long) &wr_wait;
|
||||
res = res_wr->res;
|
||||
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
|
||||
res->u.cq.op = FW_RI_RES_OP_WRITE;
|
||||
res->u.cq.iqid = cpu_to_be32(cq->cqid);
|
||||
//Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
|
||||
res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
|
||||
V_FW_RI_RES_WR_IQANUS(0) |
|
||||
V_FW_RI_RES_WR_IQANUD(1) |
|
||||
F_FW_RI_RES_WR_IQANDST |
|
||||
V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
|
||||
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
|
||||
F_FW_RI_RES_WR_IQDROPRSS |
|
||||
V_FW_RI_RES_WR_IQPCIECH(2) |
|
||||
V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
|
||||
F_FW_RI_RES_WR_IQO |
|
||||
V_FW_RI_RES_WR_IQESIZE(1));
|
||||
res->u.cq.iqsize = cpu_to_be16(cq->size);
|
||||
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
|
||||
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
|
||||
t4_wrq_tx(sc, wr);
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
if (ret)
|
||||
goto err4;
|
||||
|
||||
cq->gen = 1;
|
||||
cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
|
||||
MYPF_REG(SGE_PF_GTS));
|
||||
cq->rdev = rdev;
|
||||
|
||||
if (user) {
|
||||
cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
|
||||
(cq->cqid << rdev->cqshift));
|
||||
cq->ugts &= PAGE_MASK;
|
||||
CTR5(KTR_IW_CXGBE,
|
||||
"%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
|
||||
cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
|
||||
}
|
||||
return 0;
|
||||
err4:
|
||||
contigfree(cq->queue, cq->memsize, M_DEVBUF);
|
||||
err3:
|
||||
kfree(cq->sw_queue);
|
||||
err2:
|
||||
c4iw_put_cqid(rdev, cq->cqid, uctx);
|
||||
err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
|
||||
cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
|
||||
V_CQE_OPCODE(FW_RI_SEND) |
|
||||
V_CQE_TYPE(0) |
|
||||
V_CQE_SWCQE(1) |
|
||||
V_CQE_QPID(wq->sq.qid));
|
||||
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
}
|
||||
|
||||
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
|
||||
{
|
||||
int flushed = 0;
|
||||
int in_use = wq->rq.in_use - count;
|
||||
|
||||
BUG_ON(in_use < 0);
|
||||
CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
|
||||
__func__, wq, cq, wq->rq.in_use, count);
|
||||
while (in_use--) {
|
||||
insert_recv_cqe(wq, cq);
|
||||
flushed++;
|
||||
}
|
||||
return flushed;
|
||||
}
|
||||
|
||||
static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
|
||||
struct t4_swsqe *swcqe)
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
|
||||
cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
|
||||
V_CQE_OPCODE(swcqe->opcode) |
|
||||
V_CQE_TYPE(1) |
|
||||
V_CQE_SWCQE(1) |
|
||||
V_CQE_QPID(wq->sq.qid));
|
||||
CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
|
||||
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
|
||||
cq->sw_queue[cq->sw_pidx] = cqe;
|
||||
t4_swcq_produce(cq);
|
||||
}
|
||||
|
||||
int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
|
||||
{
|
||||
int flushed = 0;
|
||||
struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
|
||||
int in_use = wq->sq.in_use - count;
|
||||
|
||||
BUG_ON(in_use < 0);
|
||||
while (in_use--) {
|
||||
swsqe->signaled = 0;
|
||||
insert_sq_cqe(wq, cq, swsqe);
|
||||
swsqe++;
|
||||
if (swsqe == (wq->sq.sw_sq + wq->sq.size))
|
||||
swsqe = wq->sq.sw_sq;
|
||||
flushed++;
|
||||
}
|
||||
return flushed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move all CQEs from the HWCQ into the SWCQ.
|
||||
*/
|
||||
void c4iw_flush_hw_cq(struct t4_cq *cq)
|
||||
{
|
||||
struct t4_cqe *cqe = NULL, *swcqe;
|
||||
int ret;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
|
||||
ret = t4_next_hw_cqe(cq, &cqe);
|
||||
while (!ret) {
|
||||
CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
|
||||
__func__, cq->cidx, cq->sw_pidx);
|
||||
swcqe = &cq->sw_queue[cq->sw_pidx];
|
||||
*swcqe = *cqe;
|
||||
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
|
||||
t4_swcq_produce(cq);
|
||||
t4_hwcq_consume(cq);
|
||||
ret = t4_next_hw_cqe(cq, &cqe);
|
||||
}
|
||||
}
|
||||
|
||||
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
|
||||
{
|
||||
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
|
||||
return 0;
|
||||
|
||||
if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
|
||||
return 0;
|
||||
|
||||
if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
|
||||
return 0;
|
||||
|
||||
if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
|
||||
{
|
||||
struct t4_cqe *cqe;
|
||||
u32 ptr;
|
||||
|
||||
*count = 0;
|
||||
ptr = cq->sw_cidx;
|
||||
while (ptr != cq->sw_pidx) {
|
||||
cqe = &cq->sw_queue[ptr];
|
||||
if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
|
||||
wq->sq.oldest_read)) &&
|
||||
(CQE_QPID(cqe) == wq->sq.qid))
|
||||
(*count)++;
|
||||
if (++ptr == cq->size)
|
||||
ptr = 0;
|
||||
}
|
||||
CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
|
||||
}
|
||||
|
||||
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
|
||||
{
|
||||
struct t4_cqe *cqe;
|
||||
u32 ptr;
|
||||
|
||||
*count = 0;
|
||||
CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
|
||||
ptr = cq->sw_cidx;
|
||||
while (ptr != cq->sw_pidx) {
|
||||
cqe = &cq->sw_queue[ptr];
|
||||
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
|
||||
(CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
|
||||
(*count)++;
|
||||
if (++ptr == cq->size)
|
||||
ptr = 0;
|
||||
}
|
||||
CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
|
||||
}
|
||||
|
||||
static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
|
||||
{
|
||||
struct t4_swsqe *swsqe;
|
||||
u16 ptr = wq->sq.cidx;
|
||||
int count = wq->sq.in_use;
|
||||
int unsignaled = 0;
|
||||
|
||||
swsqe = &wq->sq.sw_sq[ptr];
|
||||
while (count--)
|
||||
if (!swsqe->signaled) {
|
||||
if (++ptr == wq->sq.size)
|
||||
ptr = 0;
|
||||
swsqe = &wq->sq.sw_sq[ptr];
|
||||
unsignaled++;
|
||||
} else if (swsqe->complete) {
|
||||
|
||||
/*
|
||||
* Insert this completed cqe into the swcq.
|
||||
*/
|
||||
CTR3(KTR_IW_CXGBE,
|
||||
"%s moving cqe into swcq sq idx %u cq idx %u",
|
||||
__func__, ptr, cq->sw_pidx);
|
||||
swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
|
||||
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
|
||||
t4_swcq_produce(cq);
|
||||
swsqe->signaled = 0;
|
||||
wq->sq.in_use -= unsignaled;
|
||||
break;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
|
||||
struct t4_cqe *read_cqe)
|
||||
{
|
||||
read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
|
||||
read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
|
||||
read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
|
||||
V_CQE_SWCQE(SW_CQE(hw_cqe)) |
|
||||
V_CQE_OPCODE(FW_RI_READ_REQ) |
|
||||
V_CQE_TYPE(1));
|
||||
read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a ptr to the next read wr in the SWSQ or NULL.
|
||||
*/
|
||||
static void advance_oldest_read(struct t4_wq *wq)
|
||||
{
|
||||
|
||||
u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
|
||||
|
||||
if (rptr == wq->sq.size)
|
||||
rptr = 0;
|
||||
while (rptr != wq->sq.pidx) {
|
||||
wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
|
||||
|
||||
if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
|
||||
return;
|
||||
if (++rptr == wq->sq.size)
|
||||
rptr = 0;
|
||||
}
|
||||
wq->sq.oldest_read = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* poll_cq
|
||||
*
|
||||
* Caller must:
|
||||
* check the validity of the first CQE,
|
||||
* supply the wq assicated with the qpid.
|
||||
*
|
||||
* credit: cq credit to return to sge.
|
||||
* cqe_flushed: 1 iff the CQE is flushed.
|
||||
* cqe: copy of the polled CQE.
|
||||
*
|
||||
* return value:
|
||||
* 0 CQE returned ok.
|
||||
* -EAGAIN CQE skipped, try again.
|
||||
* -EOVERFLOW CQ overflow detected.
|
||||
*/
|
||||
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
u8 *cqe_flushed, u64 *cookie, u32 *credit)
|
||||
{
|
||||
int ret = 0;
|
||||
struct t4_cqe *hw_cqe, read_cqe;
|
||||
|
||||
*cqe_flushed = 0;
|
||||
*credit = 0;
|
||||
ret = t4_next_cqe(cq, &hw_cqe);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
CTR6(KTR_IW_CXGBE,
|
||||
"%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
|
||||
CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
|
||||
CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
|
||||
CTR5(KTR_IW_CXGBE,
|
||||
"%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
|
||||
__func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
|
||||
CQE_WRID_LOW(hw_cqe));
|
||||
|
||||
/*
|
||||
* skip cqe's not affiliated with a QP.
|
||||
*/
|
||||
if (wq == NULL) {
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Gotta tweak READ completions:
|
||||
* 1) the cqe doesn't contain the sq_wptr from the wr.
|
||||
* 2) opcode not reflected from the wr.
|
||||
* 3) read_len not reflected from the wr.
|
||||
* 4) cq_type is RQ_TYPE not SQ_TYPE.
|
||||
*/
|
||||
if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
|
||||
|
||||
/*
|
||||
* If this is an unsolicited read response, then the read
|
||||
* was generated by the kernel driver as part of peer-2-peer
|
||||
* connection setup. So ignore the completion.
|
||||
*/
|
||||
if (!wq->sq.oldest_read) {
|
||||
if (CQE_STATUS(hw_cqe))
|
||||
t4_set_wq_in_error(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't write to the HWCQ, so create a new read req CQE
|
||||
* in local memory.
|
||||
*/
|
||||
create_read_req_cqe(wq, hw_cqe, &read_cqe);
|
||||
hw_cqe = &read_cqe;
|
||||
advance_oldest_read(wq);
|
||||
}
|
||||
|
||||
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
|
||||
*cqe_flushed = t4_wq_in_error(wq);
|
||||
t4_set_wq_in_error(wq);
|
||||
goto proc_cqe;
|
||||
}
|
||||
|
||||
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* RECV completion.
|
||||
*/
|
||||
if (RQ_TYPE(hw_cqe)) {
|
||||
|
||||
/*
|
||||
* HW only validates 4 bits of MSN. So we must validate that
|
||||
* the MSN in the SEND is the next expected MSN. If its not,
|
||||
* then we complete this with T4_ERR_MSN and mark the wq in
|
||||
* error.
|
||||
*/
|
||||
|
||||
if (t4_rq_empty(wq)) {
|
||||
t4_set_wq_in_error(wq);
|
||||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
|
||||
t4_set_wq_in_error(wq);
|
||||
hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
|
||||
goto proc_cqe;
|
||||
}
|
||||
goto proc_cqe;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here its a send completion.
|
||||
*
|
||||
* Handle out of order completion. These get stuffed
|
||||
* in the SW SQ. Then the SW SQ is walked to move any
|
||||
* now in-order completions into the SW CQ. This handles
|
||||
* 2 cases:
|
||||
* 1) reaping unsignaled WRs when the first subsequent
|
||||
* signaled WR is completed.
|
||||
* 2) out of order read completions.
|
||||
*/
|
||||
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
|
||||
struct t4_swsqe *swsqe;
|
||||
|
||||
CTR2(KTR_IW_CXGBE,
|
||||
"%s out of order completion going in sw_sq at idx %u",
|
||||
__func__, CQE_WRID_SQ_IDX(hw_cqe));
|
||||
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
|
||||
swsqe->cqe = *hw_cqe;
|
||||
swsqe->complete = 1;
|
||||
ret = -EAGAIN;
|
||||
goto flush_wq;
|
||||
}
|
||||
|
||||
proc_cqe:
|
||||
*cqe = *hw_cqe;
|
||||
|
||||
/*
|
||||
* Reap the associated WR(s) that are freed up with this
|
||||
* completion.
|
||||
*/
|
||||
if (SQ_TYPE(hw_cqe)) {
|
||||
wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
|
||||
CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
|
||||
__func__, wq->sq.cidx);
|
||||
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
|
||||
t4_sq_consume(wq);
|
||||
} else {
|
||||
CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
|
||||
__func__, wq->rq.cidx);
|
||||
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
|
||||
BUG_ON(t4_rq_empty(wq));
|
||||
t4_rq_consume(wq);
|
||||
}
|
||||
|
||||
flush_wq:
|
||||
/*
|
||||
* Flush any completed cqes that are now in-order.
|
||||
*/
|
||||
flush_completed_wrs(wq, cq);
|
||||
|
||||
skip_cqe:
|
||||
if (SW_CQE(hw_cqe)) {
|
||||
CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
|
||||
__func__, cq, cq->cqid, cq->sw_cidx);
|
||||
t4_swcq_consume(cq);
|
||||
} else {
|
||||
CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
|
||||
__func__, cq, cq->cqid, cq->cidx);
|
||||
t4_hwcq_consume(cq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get one cq entry from c4iw and map it to openib.
|
||||
*
|
||||
* Returns:
|
||||
* 0 cqe returned
|
||||
* -ENODATA EMPTY;
|
||||
* -EAGAIN caller must try again
|
||||
* any other -errno fatal error
|
||||
*/
|
||||
static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
||||
{
|
||||
struct c4iw_qp *qhp = NULL;
|
||||
struct t4_cqe cqe = {0, 0}, *rd_cqe;
|
||||
struct t4_wq *wq;
|
||||
u32 credit = 0;
|
||||
u8 cqe_flushed;
|
||||
u64 cookie = 0;
|
||||
int ret;
|
||||
|
||||
ret = t4_next_cqe(&chp->cq, &rd_cqe);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
|
||||
if (!qhp)
|
||||
wq = NULL;
|
||||
else {
|
||||
spin_lock(&qhp->lock);
|
||||
wq = &(qhp->wq);
|
||||
}
|
||||
ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
wc->wr_id = cookie;
|
||||
wc->qp = &qhp->ibqp;
|
||||
wc->vendor_err = CQE_STATUS(&cqe);
|
||||
wc->wc_flags = 0;
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
|
||||
__func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
|
||||
CQE_STATUS(&cqe));
|
||||
CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
|
||||
__func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
|
||||
(unsigned long long)cookie);
|
||||
|
||||
if (CQE_TYPE(&cqe) == 0) {
|
||||
if (!CQE_STATUS(&cqe))
|
||||
wc->byte_len = CQE_LEN(&cqe);
|
||||
else
|
||||
wc->byte_len = 0;
|
||||
wc->opcode = IB_WC_RECV;
|
||||
if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
|
||||
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
|
||||
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
|
||||
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
|
||||
}
|
||||
} else {
|
||||
switch (CQE_OPCODE(&cqe)) {
|
||||
case FW_RI_RDMA_WRITE:
|
||||
wc->opcode = IB_WC_RDMA_WRITE;
|
||||
break;
|
||||
case FW_RI_READ_REQ:
|
||||
wc->opcode = IB_WC_RDMA_READ;
|
||||
wc->byte_len = CQE_LEN(&cqe);
|
||||
break;
|
||||
case FW_RI_SEND_WITH_INV:
|
||||
case FW_RI_SEND_WITH_SE_INV:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
|
||||
break;
|
||||
case FW_RI_SEND:
|
||||
case FW_RI_SEND_WITH_SE:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
case FW_RI_BIND_MW:
|
||||
wc->opcode = IB_WC_BIND_MW;
|
||||
break;
|
||||
|
||||
case FW_RI_LOCAL_INV:
|
||||
wc->opcode = IB_WC_LOCAL_INV;
|
||||
break;
|
||||
case FW_RI_FAST_REGISTER:
|
||||
wc->opcode = IB_WC_FAST_REG_MR;
|
||||
break;
|
||||
default:
|
||||
printf("Unexpected opcode %d "
|
||||
"in the CQE received for QPID = 0x%0x\n",
|
||||
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (cqe_flushed)
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
else {
|
||||
|
||||
switch (CQE_STATUS(&cqe)) {
|
||||
case T4_ERR_SUCCESS:
|
||||
wc->status = IB_WC_SUCCESS;
|
||||
break;
|
||||
case T4_ERR_STAG:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case T4_ERR_PDID:
|
||||
wc->status = IB_WC_LOC_PROT_ERR;
|
||||
break;
|
||||
case T4_ERR_QPID:
|
||||
case T4_ERR_ACCESS:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case T4_ERR_WRAP:
|
||||
wc->status = IB_WC_GENERAL_ERR;
|
||||
break;
|
||||
case T4_ERR_BOUND:
|
||||
wc->status = IB_WC_LOC_LEN_ERR;
|
||||
break;
|
||||
case T4_ERR_INVALIDATE_SHARED_MR:
|
||||
case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
wc->status = IB_WC_MW_BIND_ERR;
|
||||
break;
|
||||
case T4_ERR_CRC:
|
||||
case T4_ERR_MARKER:
|
||||
case T4_ERR_PDU_LEN_ERR:
|
||||
case T4_ERR_OUT_OF_RQE:
|
||||
case T4_ERR_DDP_VERSION:
|
||||
case T4_ERR_RDMA_VERSION:
|
||||
case T4_ERR_DDP_QUEUE_NUM:
|
||||
case T4_ERR_MSN:
|
||||
case T4_ERR_TBIT:
|
||||
case T4_ERR_MO:
|
||||
case T4_ERR_MSN_RANGE:
|
||||
case T4_ERR_IRD_OVERFLOW:
|
||||
case T4_ERR_OPCODE:
|
||||
case T4_ERR_INTERNAL_ERR:
|
||||
wc->status = IB_WC_FATAL_ERR;
|
||||
break;
|
||||
case T4_ERR_SWFLUSH:
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
break;
|
||||
default:
|
||||
printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
|
||||
CQE_STATUS(&cqe), CQE_QPID(&cqe));
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (wq)
|
||||
spin_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
unsigned long flags;
|
||||
int npolled;
|
||||
int err = 0;
|
||||
|
||||
chp = to_c4iw_cq(ibcq);
|
||||
|
||||
spin_lock_irqsave(&chp->lock, flags);
|
||||
for (npolled = 0; npolled < num_entries; ++npolled) {
|
||||
do {
|
||||
err = c4iw_poll_cq_one(chp, wc + npolled);
|
||||
} while (err == -EAGAIN);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&chp->lock, flags);
|
||||
return !err || err == -ENODATA ? npolled : err;
|
||||
}
|
||||
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
|
||||
chp = to_c4iw_cq(ib_cq);
|
||||
|
||||
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
|
||||
atomic_dec(&chp->refcnt);
|
||||
wait_event(chp->wait, !atomic_read(&chp->refcnt));
|
||||
|
||||
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
|
||||
: NULL;
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
|
||||
kfree(chp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_cq *
|
||||
c4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *ib_context, struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_create_cq_resp uresp;
|
||||
struct c4iw_ucontext *ucontext = NULL;
|
||||
int ret;
|
||||
size_t memsize, hwentries;
|
||||
struct c4iw_mm_entry *mm, *mm2;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
|
||||
|
||||
rhp = to_c4iw_dev(ibdev);
|
||||
|
||||
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
|
||||
if (!chp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (ib_context)
|
||||
ucontext = to_c4iw_ucontext(ib_context);
|
||||
|
||||
/* account for the status page. */
|
||||
entries++;
|
||||
|
||||
/* IQ needs one extra entry to differentiate full vs empty. */
|
||||
entries++;
|
||||
|
||||
/*
|
||||
* entries must be multiple of 16 for HW.
|
||||
*/
|
||||
entries = roundup(entries, 16);
|
||||
|
||||
/*
|
||||
* Make actual HW queue 2x to avoid cidx_inc overflows.
|
||||
*/
|
||||
hwentries = entries * 2;
|
||||
|
||||
/*
|
||||
* Make HW queue at least 64 entries so GTS updates aren't too
|
||||
* frequent.
|
||||
*/
|
||||
if (hwentries < 64)
|
||||
hwentries = 64;
|
||||
|
||||
memsize = hwentries * sizeof *chp->cq.queue;
|
||||
|
||||
/*
|
||||
* memsize must be a multiple of the page size if its a user cq.
|
||||
*/
|
||||
if (ucontext) {
|
||||
memsize = roundup(memsize, PAGE_SIZE);
|
||||
hwentries = memsize / sizeof *chp->cq.queue;
|
||||
while (hwentries > T4_MAX_IQ_SIZE) {
|
||||
memsize -= PAGE_SIZE;
|
||||
hwentries = memsize / sizeof *chp->cq.queue;
|
||||
}
|
||||
}
|
||||
chp->cq.size = hwentries;
|
||||
chp->cq.memsize = memsize;
|
||||
|
||||
ret = create_cq(&rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
chp->rhp = rhp;
|
||||
chp->cq.size--; /* status page */
|
||||
chp->ibcq.cqe = entries - 2;
|
||||
spin_lock_init(&chp->lock);
|
||||
spin_lock_init(&chp->comp_handler_lock);
|
||||
atomic_set(&chp->refcnt, 1);
|
||||
init_waitqueue_head(&chp->wait);
|
||||
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
if (ucontext) {
|
||||
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
||||
if (!mm)
|
||||
goto err3;
|
||||
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
|
||||
if (!mm2)
|
||||
goto err4;
|
||||
|
||||
uresp.qid_mask = rhp->rdev.cqmask;
|
||||
uresp.cqid = chp->cq.cqid;
|
||||
uresp.size = chp->cq.size;
|
||||
uresp.memsize = chp->cq.memsize;
|
||||
spin_lock(&ucontext->mmap_lock);
|
||||
uresp.key = ucontext->key;
|
||||
ucontext->key += PAGE_SIZE;
|
||||
uresp.gts_key = ucontext->key;
|
||||
ucontext->key += PAGE_SIZE;
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
|
||||
if (ret)
|
||||
goto err5;
|
||||
|
||||
mm->key = uresp.key;
|
||||
mm->addr = vtophys(chp->cq.queue);
|
||||
mm->len = chp->cq.memsize;
|
||||
insert_mmap(ucontext, mm);
|
||||
|
||||
mm2->key = uresp.gts_key;
|
||||
mm2->addr = chp->cq.ugts;
|
||||
mm2->len = PAGE_SIZE;
|
||||
insert_mmap(ucontext, mm2);
|
||||
}
|
||||
CTR6(KTR_IW_CXGBE,
|
||||
"%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
|
||||
__func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
|
||||
(unsigned long long) chp->cq.dma_addr);
|
||||
return &chp->ibcq;
|
||||
err5:
|
||||
kfree(mm2);
|
||||
err4:
|
||||
kfree(mm);
|
||||
err3:
|
||||
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
|
||||
err2:
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
err1:
|
||||
kfree(chp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
int ret;
|
||||
unsigned long flag;
|
||||
|
||||
chp = to_c4iw_cq(ibcq);
|
||||
spin_lock_irqsave(&chp->lock, flag);
|
||||
ret = t4_arm_cq(&chp->cq,
|
||||
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
|
||||
spin_unlock_irqrestore(&chp->lock, flag);
|
||||
if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
369
sys/dev/cxgbe/iw_cxgbe/device.c
Normal file
369
sys/dev/cxgbe/iw_cxgbe/device.c
Normal file
@ -0,0 +1,369 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#include <sys/ktr.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include "iw_cxgbe.h"
|
||||
|
||||
int spg_creds = 2; /* Default status page size is 2 credits = 128B */
|
||||
|
||||
void
|
||||
c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct list_head *pos, *nxt;
|
||||
struct c4iw_qid_list *entry;
|
||||
|
||||
mutex_lock(&uctx->lock);
|
||||
list_for_each_safe(pos, nxt, &uctx->qpids) {
|
||||
entry = list_entry(pos, struct c4iw_qid_list, entry);
|
||||
list_del_init(&entry->entry);
|
||||
if (!(entry->qid & rdev->qpmask)) {
|
||||
c4iw_put_resource(&rdev->resource.qid_table,
|
||||
entry->qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur -= rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
}
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
list_for_each_safe(pos, nxt, &uctx->qpids) {
|
||||
entry = list_entry(pos, struct c4iw_qid_list, entry);
|
||||
list_del_init(&entry->entry);
|
||||
kfree(entry);
|
||||
}
|
||||
mutex_unlock(&uctx->lock);
|
||||
}
|
||||
|
||||
void
|
||||
c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
|
||||
INIT_LIST_HEAD(&uctx->qpids);
|
||||
INIT_LIST_HEAD(&uctx->cqids);
|
||||
mutex_init(&uctx->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_rdev_open(struct c4iw_rdev *rdev)
|
||||
{
|
||||
struct adapter *sc = rdev->adap;
|
||||
int rc;
|
||||
|
||||
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
|
||||
|
||||
/* Save the status page size set by if_cxgbe */
|
||||
spg_creds = (t4_read_reg(sc, A_SGE_CONTROL) & F_EGRSTATUSPAGESIZE) ?
|
||||
2 : 1;
|
||||
|
||||
/* XXX: we can probably make this work */
|
||||
if (sc->sge.eq_s_qpp > PAGE_SHIFT || sc->sge.iq_s_qpp > PAGE_SHIFT) {
|
||||
device_printf(sc->dev,
|
||||
"doorbell density too high (eq %d, iq %d, pg %d).\n",
|
||||
sc->sge.eq_s_qpp, sc->sge.eq_s_qpp, PAGE_SHIFT);
|
||||
rc = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
rdev->qpshift = PAGE_SHIFT - sc->sge.eq_s_qpp;
|
||||
rdev->qpmask = (1 << sc->sge.eq_s_qpp) - 1;
|
||||
rdev->cqshift = PAGE_SHIFT - sc->sge.iq_s_qpp;
|
||||
rdev->cqmask = (1 << sc->sge.iq_s_qpp) - 1;
|
||||
|
||||
if (c4iw_num_stags(rdev) == 0) {
|
||||
rc = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
rdev->stats.pd.total = T4_MAX_NUM_PD;
|
||||
rdev->stats.stag.total = sc->vres.stag.size;
|
||||
rdev->stats.pbl.total = sc->vres.pbl.size;
|
||||
rdev->stats.rqt.total = sc->vres.rq.size;
|
||||
rdev->stats.qid.total = sc->vres.qp.size;
|
||||
|
||||
rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
|
||||
if (rc) {
|
||||
device_printf(sc->dev, "error %d initializing resources\n", rc);
|
||||
goto err1;
|
||||
}
|
||||
rc = c4iw_pblpool_create(rdev);
|
||||
if (rc) {
|
||||
device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
|
||||
goto err2;
|
||||
}
|
||||
rc = c4iw_rqtpool_create(rdev);
|
||||
if (rc) {
|
||||
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
|
||||
goto err3;
|
||||
}
|
||||
|
||||
return (0);
|
||||
err3:
|
||||
c4iw_pblpool_destroy(rdev);
|
||||
err2:
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
err1:
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
{
|
||||
c4iw_pblpool_destroy(rdev);
|
||||
c4iw_rqtpool_destroy(rdev);
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
static void
|
||||
c4iw_dealloc(struct c4iw_dev *iwsc)
|
||||
{
|
||||
|
||||
c4iw_rdev_close(&iwsc->rdev);
|
||||
idr_destroy(&iwsc->cqidr);
|
||||
idr_destroy(&iwsc->qpidr);
|
||||
idr_destroy(&iwsc->mmidr);
|
||||
ib_dealloc_device(&iwsc->ibdev);
|
||||
}
|
||||
|
||||
static struct c4iw_dev *
|
||||
c4iw_alloc(struct adapter *sc)
|
||||
{
|
||||
struct c4iw_dev *iwsc;
|
||||
int rc;
|
||||
|
||||
iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc));
|
||||
if (iwsc == NULL) {
|
||||
device_printf(sc->dev, "Cannot allocate ib device.\n");
|
||||
return (ERR_PTR(-ENOMEM));
|
||||
}
|
||||
iwsc->rdev.adap = sc;
|
||||
|
||||
rc = c4iw_rdev_open(&iwsc->rdev);
|
||||
if (rc != 0) {
|
||||
device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc);
|
||||
ib_dealloc_device(&iwsc->ibdev);
|
||||
return (ERR_PTR(rc));
|
||||
}
|
||||
|
||||
idr_init(&iwsc->cqidr);
|
||||
idr_init(&iwsc->qpidr);
|
||||
idr_init(&iwsc->mmidr);
|
||||
spin_lock_init(&iwsc->lock);
|
||||
mutex_init(&iwsc->rdev.stats.lock);
|
||||
|
||||
return (iwsc);
|
||||
}
|
||||
|
||||
static int c4iw_mod_load(void);
|
||||
static int c4iw_mod_unload(void);
|
||||
static int c4iw_activate(struct adapter *);
|
||||
static int c4iw_deactivate(struct adapter *);
|
||||
|
||||
static struct uld_info c4iw_uld_info = {
|
||||
.uld_id = ULD_IWARP,
|
||||
.activate = c4iw_activate,
|
||||
.deactivate = c4iw_deactivate,
|
||||
};
|
||||
|
||||
static int
|
||||
c4iw_activate(struct adapter *sc)
|
||||
{
|
||||
struct c4iw_dev *iwsc;
|
||||
int rc;
|
||||
|
||||
ASSERT_SYNCHRONIZED_OP(sc);
|
||||
|
||||
if (isset(&sc->offload_map, MAX_NPORTS)) {
|
||||
KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (sc->rdmacaps == 0) {
|
||||
device_printf(sc->dev,
|
||||
"RDMA not supported or RDMA cap is not enabled.\n");
|
||||
return (ENOSYS);
|
||||
}
|
||||
|
||||
iwsc = c4iw_alloc(sc);
|
||||
if (IS_ERR(iwsc)) {
|
||||
rc = -PTR_ERR(iwsc);
|
||||
device_printf(sc->dev, "initialization failed: %d\n", rc);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
sc->iwarp_softc = iwsc;
|
||||
c4iw_cm_init_cpl(sc);
|
||||
|
||||
rc = -c4iw_register_device(iwsc);
|
||||
if (rc) {
|
||||
device_printf(sc->dev, "RDMA registration failed: %d\n", rc);
|
||||
c4iw_dealloc(iwsc);
|
||||
sc->iwarp_softc = NULL;
|
||||
}
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_deactivate(struct adapter *sc)
|
||||
{
|
||||
struct c4iw_dev *iwsc = sc->iwarp_softc;
|
||||
|
||||
ASSERT_SYNCHRONIZED_OP(sc);
|
||||
|
||||
c4iw_unregister_device(iwsc);
|
||||
c4iw_dealloc(iwsc);
|
||||
sc->iwarp_softc = NULL;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
c4iw_activate_all(struct adapter *sc, void *arg __unused)
|
||||
{
|
||||
|
||||
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
|
||||
return;
|
||||
|
||||
if (!isset(&sc->offload_map, MAX_NPORTS) &&
|
||||
t4_activate_uld(sc, ULD_IWARP) == 0)
|
||||
setbit(&sc->offload_map, MAX_NPORTS);
|
||||
|
||||
end_synchronized_op(sc, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
|
||||
{
|
||||
|
||||
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
|
||||
return;
|
||||
|
||||
if (isset(&sc->offload_map, MAX_NPORTS) &&
|
||||
t4_deactivate_uld(sc, ULD_IWARP) == 0)
|
||||
clrbit(&sc->offload_map, MAX_NPORTS);
|
||||
|
||||
end_synchronized_op(sc, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_mod_load(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = -c4iw_cm_init();
|
||||
if (rc != 0)
|
||||
return (rc);
|
||||
|
||||
rc = t4_register_uld(&c4iw_uld_info);
|
||||
if (rc != 0) {
|
||||
c4iw_cm_term();
|
||||
return (rc);
|
||||
}
|
||||
|
||||
t4_iterate(c4iw_activate_all, NULL);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_mod_unload(void)
|
||||
{
|
||||
|
||||
t4_iterate(c4iw_deactivate_all, NULL);
|
||||
|
||||
c4iw_cm_term();
|
||||
|
||||
if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
|
||||
return (EBUSY);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
#endif
|
||||
#undef MODULE_VERSION
|
||||
#include <sys/module.h>
|
||||
|
||||
/*
|
||||
* t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency
|
||||
* on t4_tom ensures that it won't either. So we don't directly check for
|
||||
* TCP_OFFLOAD here.
|
||||
*/
|
||||
static int
|
||||
c4iw_modevent(module_t mod, int cmd, void *arg)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
switch (cmd) {
|
||||
case MOD_LOAD:
|
||||
rc = c4iw_mod_load();
|
||||
if (rc == 0)
|
||||
printf("iw_cxgbe: Chelsio T4/T5 RDMA driver loaded.\n");
|
||||
break;
|
||||
|
||||
case MOD_UNLOAD:
|
||||
rc = c4iw_mod_unload();
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = EINVAL;
|
||||
}
|
||||
#else
|
||||
printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
|
||||
rc = EOPNOTSUPP;
|
||||
#endif
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static moduledata_t c4iw_mod_data = {
|
||||
"iw_cxgbe",
|
||||
c4iw_modevent,
|
||||
0
|
||||
};
|
||||
|
||||
MODULE_VERSION(iw_cxgbe, 1);
|
||||
MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1);
|
||||
DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);
|
206
sys/dev/cxgbe/iw_cxgbe/ev.c
Normal file
206
sys/dev/cxgbe/iw_cxgbe/ev.c
Normal file
@ -0,0 +1,206 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "iw_cxgbe.h"
|
||||
|
||||
static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
|
||||
struct c4iw_qp *qhp,
|
||||
struct t4_cqe *err_cqe,
|
||||
enum ib_event_type ib_event)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct c4iw_qp_attributes attrs;
|
||||
unsigned long flag;
|
||||
|
||||
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
|
||||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
|
||||
CTR4(KTR_IW_CXGBE, "%s AE received after RTS - "
|
||||
"qp state %d qpid 0x%x status 0x%x", __func__,
|
||||
qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
|
||||
return;
|
||||
}
|
||||
|
||||
printf("AE qpid 0x%x opcode %d status 0x%x "
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
|
||||
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
|
||||
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
|
||||
|
||||
if (qhp->attr.state == C4IW_QP_STATE_RTS) {
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 0);
|
||||
}
|
||||
|
||||
event.event = ib_event;
|
||||
event.device = chp->ibcq.device;
|
||||
if (ib_event == IB_EVENT_CQ_ERR)
|
||||
event.element.cq = &chp->ibcq;
|
||||
else
|
||||
event.element.qp = &qhp->ibqp;
|
||||
if (qhp->ibqp.event_handler)
|
||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
}
|
||||
|
||||
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_qp *qhp;
|
||||
u32 cqid;
|
||||
|
||||
spin_lock_irq(&dev->lock);
|
||||
qhp = get_qhp(dev, CQE_QPID(err_cqe));
|
||||
if (!qhp) {
|
||||
printf("BAD AE qpid 0x%x opcode %d "
|
||||
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
CQE_QPID(err_cqe),
|
||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
CQE_WRID_LOW(err_cqe));
|
||||
spin_unlock_irq(&dev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (SQ_TYPE(err_cqe))
|
||||
cqid = qhp->attr.scq;
|
||||
else
|
||||
cqid = qhp->attr.rcq;
|
||||
chp = get_chp(dev, cqid);
|
||||
if (!chp) {
|
||||
printf("BAD AE cqid 0x%x qpid 0x%x opcode %d "
|
||||
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
cqid, CQE_QPID(err_cqe),
|
||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||
CQE_WRID_LOW(err_cqe));
|
||||
spin_unlock_irq(&dev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
c4iw_qp_add_ref(&qhp->ibqp);
|
||||
atomic_inc(&chp->refcnt);
|
||||
spin_unlock_irq(&dev->lock);
|
||||
|
||||
/* Bad incoming write */
|
||||
if (RQ_TYPE(err_cqe) &&
|
||||
(CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (CQE_STATUS(err_cqe)) {
|
||||
|
||||
/* Completion Events */
|
||||
case T4_ERR_SUCCESS:
|
||||
printf(KERN_ERR MOD "AE with status 0!\n");
|
||||
break;
|
||||
|
||||
case T4_ERR_STAG:
|
||||
case T4_ERR_PDID:
|
||||
case T4_ERR_QPID:
|
||||
case T4_ERR_ACCESS:
|
||||
case T4_ERR_WRAP:
|
||||
case T4_ERR_BOUND:
|
||||
case T4_ERR_INVALIDATE_SHARED_MR:
|
||||
case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
|
||||
break;
|
||||
|
||||
/* Device Fatal Errors */
|
||||
case T4_ERR_ECC:
|
||||
case T4_ERR_ECC_PSTAG:
|
||||
case T4_ERR_INTERNAL_ERR:
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
|
||||
break;
|
||||
|
||||
/* QP Fatal Errors */
|
||||
case T4_ERR_OUT_OF_RQE:
|
||||
case T4_ERR_PBL_ADDR_BOUND:
|
||||
case T4_ERR_CRC:
|
||||
case T4_ERR_MARKER:
|
||||
case T4_ERR_PDU_LEN_ERR:
|
||||
case T4_ERR_DDP_VERSION:
|
||||
case T4_ERR_RDMA_VERSION:
|
||||
case T4_ERR_OPCODE:
|
||||
case T4_ERR_DDP_QUEUE_NUM:
|
||||
case T4_ERR_MSN:
|
||||
case T4_ERR_TBIT:
|
||||
case T4_ERR_MO:
|
||||
case T4_ERR_MSN_GAP:
|
||||
case T4_ERR_MSN_RANGE:
|
||||
case T4_ERR_RQE_ADDR_BOUND:
|
||||
case T4_ERR_IRD_OVERFLOW:
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("Unknown T4 status 0x%x QPID 0x%x\n",
|
||||
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
|
||||
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
|
||||
break;
|
||||
}
|
||||
done:
|
||||
if (atomic_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
c4iw_qp_rem_ref(&qhp->ibqp);
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
int c4iw_ev_handler(struct sge_iq *iq, const struct rsp_ctrl *rc)
|
||||
{
|
||||
struct c4iw_dev *dev = iq->adapter->iwarp_softc;
|
||||
u32 qid = be32_to_cpu(rc->pldbuflen_qid);
|
||||
struct c4iw_cq *chp;
|
||||
unsigned long flag;
|
||||
|
||||
chp = get_chp(dev, qid);
|
||||
if (chp) {
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
} else
|
||||
CTR2(KTR_IW_CXGBE, "%s unknown cqid 0x%x", __func__, qid);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
118
sys/dev/cxgbe/iw_cxgbe/id_table.c
Normal file
118
sys/dev/cxgbe/iw_cxgbe/id_table.c
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2013 Chelsio Communications. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/libkern.h>
|
||||
#include "iw_cxgbe.h"
|
||||
|
||||
#define RANDOM_SKIP 16
|
||||
|
||||
/*
|
||||
* Trivial bitmap-based allocator. If the random flag is set, the
|
||||
* allocator is designed to:
|
||||
* - pseudo-randomize the id returned such that it is not trivially predictable.
|
||||
* - avoid reuse of recently used id (at the expense of predictability)
|
||||
*/
|
||||
u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 obj;
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
|
||||
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
|
||||
if (obj >= alloc->max)
|
||||
obj = find_first_zero_bit(alloc->table, alloc->max);
|
||||
|
||||
if (obj < alloc->max) {
|
||||
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last += arc4random() % RANDOM_SKIP;
|
||||
else
|
||||
alloc->last = obj + 1;
|
||||
if (alloc->last >= alloc->max)
|
||||
alloc->last = 0;
|
||||
set_bit(obj, alloc->table);
|
||||
obj += alloc->start;
|
||||
} else
|
||||
obj = -1;
|
||||
|
||||
spin_unlock_irqrestore(&alloc->lock, flags);
|
||||
return obj;
|
||||
}
|
||||
|
||||
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
obj -= alloc->start;
|
||||
BUG_ON((int)obj < 0);
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
clear_bit(obj, alloc->table);
|
||||
spin_unlock_irqrestore(&alloc->lock, flags);
|
||||
}
|
||||
|
||||
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
|
||||
u32 reserved, u32 flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
alloc->start = start;
|
||||
alloc->flags = flags;
|
||||
if (flags & C4IW_ID_TABLE_F_RANDOM)
|
||||
alloc->last = arc4random() % RANDOM_SKIP;
|
||||
else
|
||||
alloc->last = 0;
|
||||
alloc->max = num;
|
||||
spin_lock_init(&alloc->lock);
|
||||
alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!alloc->table)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap_zero(alloc->table, num);
|
||||
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
|
||||
for (i = 0; i < reserved; ++i)
|
||||
set_bit(i, alloc->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void c4iw_id_table_free(struct c4iw_id_table *alloc)
|
||||
{
|
||||
kfree(alloc->table);
|
||||
}
|
||||
#endif
|
1046
sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
Normal file
1046
sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
Normal file
File diff suppressed because it is too large
Load Diff
828
sys/dev/cxgbe/iw_cxgbe/mem.c
Normal file
828
sys/dev/cxgbe/iw_cxgbe/mem.c
Normal file
@ -0,0 +1,828 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <linux/types.h>
|
||||
#include <linux/kref.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include <common/t4_msg.h>
|
||||
#include "iw_cxgbe.h"
|
||||
|
||||
#define T4_ULPTX_MIN_IO 32
|
||||
#define C4IW_MAX_INLINE_SIZE 96
|
||||
|
||||
static int
|
||||
write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
|
||||
{
|
||||
struct adapter *sc = rdev->adap;
|
||||
struct ulp_mem_io *ulpmc;
|
||||
struct ulptx_idata *ulpsc;
|
||||
u8 wr_len, *to_dp, *from_dp;
|
||||
int copy_len, num_wqe, i, ret = 0;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
struct wrqe *wr;
|
||||
u32 cmd;
|
||||
|
||||
cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
|
||||
if (is_t4(sc))
|
||||
cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
|
||||
else
|
||||
cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
|
||||
|
||||
addr &= 0x7FFFFFF;
|
||||
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
|
||||
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
for (i = 0; i < num_wqe; i++) {
|
||||
|
||||
copy_len = min(len, C4IW_MAX_INLINE_SIZE);
|
||||
wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
|
||||
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
|
||||
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
ulpmc = wrtod(wr);
|
||||
|
||||
memset(ulpmc, 0, wr_len);
|
||||
INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
|
||||
|
||||
if (i == (num_wqe-1)) {
|
||||
ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
|
||||
F_FW_WR_COMPL);
|
||||
ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
|
||||
} else
|
||||
ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
|
||||
ulpmc->wr.wr_mid = cpu_to_be32(
|
||||
V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
|
||||
|
||||
ulpmc->cmd = cmd;
|
||||
ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
|
||||
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
|
||||
ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
|
||||
16));
|
||||
ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
|
||||
|
||||
ulpsc = (struct ulptx_idata *)(ulpmc + 1);
|
||||
ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
|
||||
ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
|
||||
|
||||
to_dp = (u8 *)(ulpsc + 1);
|
||||
from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
|
||||
if (data)
|
||||
memcpy(to_dp, from_dp, copy_len);
|
||||
else
|
||||
memset(to_dp, 0, copy_len);
|
||||
if (copy_len % T4_ULPTX_MIN_IO)
|
||||
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
|
||||
(copy_len % T4_ULPTX_MIN_IO));
|
||||
t4_wrq_tx(sc, wr);
|
||||
len -= C4IW_MAX_INLINE_SIZE;
|
||||
}
|
||||
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build and write a TPT entry.
|
||||
* IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
|
||||
* pbl_size and pbl_addr
|
||||
* OUT: stag index
|
||||
*/
|
||||
static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
u32 *stag, u8 stag_state, u32 pdid,
|
||||
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
|
||||
int bind_enabled, u32 zbva, u64 to,
|
||||
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
|
||||
{
|
||||
int err;
|
||||
struct fw_ri_tpte tpt;
|
||||
u32 stag_idx;
|
||||
static atomic_t key;
|
||||
|
||||
if (c4iw_fatal_error(rdev))
|
||||
return -EIO;
|
||||
|
||||
stag_state = stag_state > 0;
|
||||
stag_idx = (*stag) >> 8;
|
||||
|
||||
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
|
||||
stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
|
||||
if (!stag_idx)
|
||||
return -ENOMEM;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.stag.cur += 32;
|
||||
if (rdev->stats.stag.cur > rdev->stats.stag.max)
|
||||
rdev->stats.stag.max = rdev->stats.stag.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
|
||||
}
|
||||
CTR5(KTR_IW_CXGBE,
|
||||
"%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
|
||||
__func__, stag_state, type, pdid, stag_idx);
|
||||
|
||||
/* write TPT entry */
|
||||
if (reset_tpt_entry)
|
||||
memset(&tpt, 0, sizeof(tpt));
|
||||
else {
|
||||
tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
|
||||
V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
|
||||
V_FW_RI_TPTE_STAGSTATE(stag_state) |
|
||||
V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
|
||||
tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
|
||||
(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
|
||||
V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
|
||||
FW_RI_VA_BASED_TO))|
|
||||
V_FW_RI_TPTE_PS(page_size));
|
||||
tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
|
||||
V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
|
||||
tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
|
||||
tpt.va_hi = cpu_to_be32((u32)(to >> 32));
|
||||
tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
|
||||
tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
|
||||
tpt.len_hi = cpu_to_be32((u32)(len >> 32));
|
||||
}
|
||||
err = write_adapter_mem(rdev, stag_idx +
|
||||
(rdev->adap->vres.stag.start >> 5),
|
||||
sizeof(tpt), &tpt);
|
||||
|
||||
if (reset_tpt_entry) {
|
||||
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.stag.cur -= 32;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
|
||||
u32 pbl_addr, u32 pbl_size)
|
||||
{
|
||||
int err;
|
||||
|
||||
CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
|
||||
__func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
|
||||
|
||||
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
|
||||
u32 pbl_addr)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
|
||||
pbl_size, pbl_addr);
|
||||
}
|
||||
|
||||
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
|
||||
0UL, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
|
||||
0);
|
||||
}
|
||||
|
||||
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
|
||||
u32 pbl_size, u32 pbl_addr)
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
|
||||
0UL, 0, 0, pbl_size, pbl_addr);
|
||||
}
|
||||
|
||||
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
|
||||
{
|
||||
u32 mmid;
|
||||
|
||||
mhp->attr.state = 1;
|
||||
mhp->attr.stag = stag;
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
|
||||
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
|
||||
}
|
||||
|
||||
static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
|
||||
struct c4iw_mr *mhp, int shift)
|
||||
{
|
||||
u32 stag = T4_STAG_UNSET;
|
||||
int ret;
|
||||
|
||||
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
|
||||
FW_RI_STAG_NSMR, mhp->attr.perms,
|
||||
mhp->attr.mw_bind_enable, mhp->attr.zbva,
|
||||
mhp->attr.va_fbo, mhp->attr.len, shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
|
||||
struct c4iw_mr *mhp, int shift, int npages)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
if (npages > mhp->attr.pbl_size)
|
||||
return -ENOMEM;
|
||||
|
||||
stag = mhp->attr.stag;
|
||||
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
|
||||
FW_RI_STAG_NSMR, mhp->attr.perms,
|
||||
mhp->attr.mw_bind_enable, mhp->attr.zbva,
|
||||
mhp->attr.va_fbo, mhp->attr.len, shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alloc_pbl(struct c4iw_mr *mhp, int npages)
|
||||
{
|
||||
mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
|
||||
npages << 3);
|
||||
|
||||
if (!mhp->attr.pbl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
mhp->attr.pbl_size = npages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_phys_page_list(struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf, u64 *iova_start,
|
||||
u64 *total_size, int *npages,
|
||||
int *shift, __be64 **page_list)
|
||||
{
|
||||
u64 mask;
|
||||
int i, j, n;
|
||||
|
||||
mask = 0;
|
||||
*total_size = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i) {
|
||||
if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (i != 0 && i != num_phys_buf - 1 &&
|
||||
(buffer_list[i].size & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
*total_size += buffer_list[i].size;
|
||||
if (i > 0)
|
||||
mask |= buffer_list[i].addr;
|
||||
else
|
||||
mask |= buffer_list[i].addr & PAGE_MASK;
|
||||
if (i != num_phys_buf - 1)
|
||||
mask |= buffer_list[i].addr + buffer_list[i].size;
|
||||
else
|
||||
mask |= (buffer_list[i].addr + buffer_list[i].size +
|
||||
PAGE_SIZE - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
if (*total_size > 0xFFFFFFFFULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Find largest page shift we can use to cover buffers */
|
||||
for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
|
||||
if ((1ULL << *shift) & mask)
|
||||
break;
|
||||
|
||||
buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
|
||||
buffer_list[0].addr &= ~0ull << *shift;
|
||||
|
||||
*npages = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i)
|
||||
*npages += (buffer_list[i].size +
|
||||
(1ULL << *shift) - 1) >> *shift;
|
||||
|
||||
if (!*npages)
|
||||
return -EINVAL;
|
||||
|
||||
*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
|
||||
if (!*page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
n = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i)
|
||||
for (j = 0;
|
||||
j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
|
||||
++j)
|
||||
(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
|
||||
((u64) j << *shift));
|
||||
|
||||
CTR6(KTR_IW_CXGBE,
|
||||
"%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
|
||||
(unsigned long long)*iova_start, (unsigned long long)mask, *shift,
|
||||
(unsigned long long)*total_size, *npages);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
|
||||
struct ib_pd *pd, struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf, int acc, u64 *iova_start)
|
||||
{
|
||||
|
||||
struct c4iw_mr mh, *mhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_dev *rhp;
|
||||
__be64 *page_list = NULL;
|
||||
int shift = 0;
|
||||
u64 total_size;
|
||||
int npages = 0;
|
||||
int ret;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
|
||||
|
||||
/* There can be no memory windows */
|
||||
if (atomic_read(&mr->usecnt))
|
||||
return -EINVAL;
|
||||
|
||||
mhp = to_c4iw_mr(mr);
|
||||
rhp = mhp->rhp;
|
||||
php = to_c4iw_pd(mr->pd);
|
||||
|
||||
/* make sure we are on the same adapter */
|
||||
if (rhp != php->rhp)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&mh, mhp, sizeof *mhp);
|
||||
|
||||
if (mr_rereg_mask & IB_MR_REREG_PD)
|
||||
php = to_c4iw_pd(pd);
|
||||
if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
|
||||
mh.attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
|
||||
IB_ACCESS_MW_BIND;
|
||||
}
|
||||
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
|
||||
ret = build_phys_page_list(buffer_list, num_phys_buf,
|
||||
iova_start,
|
||||
&total_size, &npages,
|
||||
&shift, &page_list);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = reregister_mem(rhp, php, &mh, shift, npages);
|
||||
kfree(page_list);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (mr_rereg_mask & IB_MR_REREG_PD)
|
||||
mhp->attr.pdid = php->pdid;
|
||||
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
|
||||
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
|
||||
mhp->attr.zbva = 0;
|
||||
mhp->attr.va_fbo = *iova_start;
|
||||
mhp->attr.page_size = shift - 12;
|
||||
mhp->attr.len = (u32) total_size;
|
||||
mhp->attr.pbl_size = npages;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
|
||||
struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf, int acc, u64 *iova_start)
|
||||
{
|
||||
__be64 *page_list;
|
||||
int shift;
|
||||
u64 total_size;
|
||||
int npages;
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mr *mhp;
|
||||
int ret;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->rhp = rhp;
|
||||
|
||||
/* First check that we have enough alignment */
|
||||
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (num_phys_buf > 1 &&
|
||||
((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
|
||||
&total_size, &npages, &shift,
|
||||
&page_list);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = alloc_pbl(mhp, npages);
|
||||
if (ret) {
|
||||
kfree(page_list);
|
||||
goto err_pbl;
|
||||
}
|
||||
|
||||
ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
|
||||
npages);
|
||||
kfree(page_list);
|
||||
if (ret)
|
||||
goto err_pbl;
|
||||
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.zbva = 0;
|
||||
|
||||
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
mhp->attr.va_fbo = *iova_start;
|
||||
mhp->attr.page_size = shift - 12;
|
||||
|
||||
mhp->attr.len = (u32) total_size;
|
||||
mhp->attr.pbl_size = npages;
|
||||
ret = register_mem(rhp, php, mhp, shift);
|
||||
if (ret)
|
||||
goto err_pbl;
|
||||
|
||||
return &mhp->ibmr;
|
||||
|
||||
err_pbl:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
|
||||
err:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
}
|
||||
|
||||
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mr *mhp;
|
||||
int ret;
|
||||
u32 stag = T4_STAG_UNSET;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->rhp = rhp;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
|
||||
mhp->attr.zbva = 0;
|
||||
mhp->attr.va_fbo = 0;
|
||||
mhp->attr.page_size = 0;
|
||||
mhp->attr.len = ~0UL;
|
||||
mhp->attr.pbl_size = 0;
|
||||
|
||||
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
|
||||
FW_RI_STAG_NSMR, mhp->attr.perms,
|
||||
mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
goto err2;
|
||||
return &mhp->ibmr;
|
||||
err2:
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
err1:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt, int acc, struct ib_udata *udata, int mr_id)
|
||||
{
|
||||
__be64 *pages;
|
||||
int shift, n, len;
|
||||
int i, j, k;
|
||||
int err = 0;
|
||||
struct ib_umem_chunk *chunk;
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mr *mhp;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
|
||||
|
||||
if (length == ~0ULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if ((length + start) < start)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->rhp = rhp;
|
||||
|
||||
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
|
||||
if (IS_ERR(mhp->umem)) {
|
||||
err = PTR_ERR(mhp->umem);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
shift = ffs(mhp->umem->page_size) - 1;
|
||||
|
||||
n = 0;
|
||||
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
|
||||
n += chunk->nents;
|
||||
|
||||
err = alloc_pbl(mhp, n);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
pages = (__be64 *) __get_free_page(GFP_KERNEL);
|
||||
if (!pages) {
|
||||
err = -ENOMEM;
|
||||
goto err_pbl;
|
||||
}
|
||||
|
||||
i = n = 0;
|
||||
|
||||
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
|
||||
for (j = 0; j < chunk->nmap; ++j) {
|
||||
len = sg_dma_len(&chunk->page_list[j]) >> shift;
|
||||
for (k = 0; k < len; ++k) {
|
||||
pages[i++] = cpu_to_be64(sg_dma_address(
|
||||
&chunk->page_list[j]) +
|
||||
mhp->umem->page_size * k);
|
||||
if (i == PAGE_SIZE / sizeof *pages) {
|
||||
err = write_pbl(&mhp->rhp->rdev,
|
||||
pages,
|
||||
mhp->attr.pbl_addr + (n << 3), i);
|
||||
if (err)
|
||||
goto pbl_done;
|
||||
n += i;
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (i)
|
||||
err = write_pbl(&mhp->rhp->rdev, pages,
|
||||
mhp->attr.pbl_addr + (n << 3), i);
|
||||
|
||||
pbl_done:
|
||||
free_page((unsigned long) pages);
|
||||
if (err)
|
||||
goto err_pbl;
|
||||
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.zbva = 0;
|
||||
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
|
||||
mhp->attr.va_fbo = virt;
|
||||
mhp->attr.page_size = shift - 12;
|
||||
mhp->attr.len = length;
|
||||
|
||||
err = register_mem(rhp, php, mhp, shift);
|
||||
if (err)
|
||||
goto err_pbl;
|
||||
|
||||
return &mhp->ibmr;
|
||||
|
||||
err_pbl:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
|
||||
err:
|
||||
ib_umem_release(mhp->umem);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mw *mhp;
|
||||
u32 mmid;
|
||||
u32 stag = 0;
|
||||
int ret;
|
||||
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
|
||||
if (ret) {
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
mhp->rhp = rhp;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.type = FW_RI_STAG_MW;
|
||||
mhp->attr.stag = stag;
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmw.rkey = stag;
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
|
||||
stag);
|
||||
return &(mhp->ibmw);
|
||||
}
|
||||
|
||||
int c4iw_dealloc_mw(struct ib_mw *mw)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_mw *mhp;
|
||||
u32 mmid;
|
||||
|
||||
mhp = to_c4iw_mw(mw);
|
||||
rhp = mhp->rhp;
|
||||
mmid = (mw->rkey) >> 8;
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
kfree(mhp);
|
||||
CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
|
||||
mhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mr *mhp;
|
||||
u32 mmid;
|
||||
u32 stag = 0;
|
||||
int ret = 0;
|
||||
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
mhp->rhp = rhp;
|
||||
ret = alloc_pbl(mhp, pbl_depth);
|
||||
if (ret)
|
||||
goto err1;
|
||||
mhp->attr.pbl_size = pbl_depth;
|
||||
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
if (ret)
|
||||
goto err2;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.type = FW_RI_STAG_NSMR;
|
||||
mhp->attr.stag = stag;
|
||||
mhp->attr.state = 1;
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
ret = -ENOMEM;
|
||||
goto err3;
|
||||
}
|
||||
|
||||
CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
|
||||
stag);
|
||||
return &(mhp->ibmr);
|
||||
err3:
|
||||
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
err2:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
err1:
|
||||
kfree(mhp);
|
||||
err:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
|
||||
int page_list_len)
|
||||
{
|
||||
struct c4iw_fr_page_list *c4pl;
|
||||
struct c4iw_dev *dev = to_c4iw_dev(device);
|
||||
bus_addr_t dma_addr;
|
||||
int size = sizeof *c4pl + page_list_len * sizeof(u64);
|
||||
|
||||
c4pl = contigmalloc(size,
|
||||
M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
|
||||
if (c4pl)
|
||||
dma_addr = vtophys(c4pl);
|
||||
else
|
||||
return ERR_PTR(-ENOMEM);;
|
||||
|
||||
pci_unmap_addr_set(c4pl, mapping, dma_addr);
|
||||
c4pl->dma_addr = dma_addr;
|
||||
c4pl->dev = dev;
|
||||
c4pl->size = size;
|
||||
c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
|
||||
c4pl->ibpl.max_page_list_len = page_list_len;
|
||||
|
||||
return &c4pl->ibpl;
|
||||
}
|
||||
|
||||
void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
|
||||
{
|
||||
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
|
||||
contigfree(c4pl, c4pl->size, M_DEVBUF);
|
||||
}
|
||||
|
||||
int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
{
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_mr *mhp;
|
||||
u32 mmid;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
|
||||
/* There can be no memory windows */
|
||||
if (atomic_read(&ib_mr->usecnt))
|
||||
return -EINVAL;
|
||||
|
||||
mhp = to_c4iw_mr(ib_mr);
|
||||
rhp = mhp->rhp;
|
||||
mmid = mhp->attr.stag >> 8;
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
if (mhp->attr.pbl_size)
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
if (mhp->kva)
|
||||
kfree((void *) (unsigned long) mhp->kva);
|
||||
if (mhp->umem)
|
||||
ib_umem_release(mhp->umem);
|
||||
CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
|
||||
kfree(mhp);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
498
sys/dev/cxgbe/iw_cxgbe/provider.c
Normal file
498
sys/dev/cxgbe/iw_cxgbe/provider.c
Normal file
@ -0,0 +1,498 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/page.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#include "iw_cxgbe.h"
|
||||
#include "user.h"
|
||||
|
||||
static int fastreg_support = 1;
|
||||
module_param(fastreg_support, int, 0644);
|
||||
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
|
||||
|
||||
static int c4iw_modify_port(struct ib_device *ibdev,
|
||||
u8 port, int port_modify_mask,
|
||||
struct ib_port_modify *props)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static int c4iw_ah_destroy(struct ib_ah *ah)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
|
||||
u8 port_num, struct ib_wc *in_wc,
|
||||
struct ib_grh *in_grh, struct ib_mad *in_mad,
|
||||
struct ib_mad *out_mad)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(context->device);
|
||||
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
||||
struct c4iw_mm_entry *mm, *tmp;
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
kfree(ucontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_ucontext *context;
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
|
||||
|
||||
CTR2(KTR_IW_CXGBE, "%s ibdev %p", __func__, ibdev);
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
|
||||
INIT_LIST_HEAD(&context->mmaps);
|
||||
spin_lock_init(&context->mmap_lock);
|
||||
return &context->ibucontext;
|
||||
}
|
||||
|
||||
static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
|
||||
{
|
||||
return pgprot_writecombine(prot);
|
||||
}
|
||||
|
||||
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
int len = vma->vm_end - vma->vm_start;
|
||||
u32 key = vma->vm_pgoff << PAGE_SHIFT;
|
||||
struct c4iw_rdev *rdev;
|
||||
int ret = 0;
|
||||
struct c4iw_mm_entry *mm;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
u64 addr, paddr;
|
||||
|
||||
u64 va_regs_res = 0, va_udbs_res = 0;
|
||||
u64 len_regs_res = 0, len_udbs_res = 0;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);
|
||||
|
||||
CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
|
||||
vma->vm_pgoff, key, len);
|
||||
|
||||
if (vma->vm_start & (PAGE_SIZE-1)) {
|
||||
CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
|
||||
__func__, vma->vm_start, vma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rdev = &(to_c4iw_dev(context->device)->rdev);
|
||||
ucontext = to_c4iw_ucontext(context);
|
||||
|
||||
mm = remove_mmap(ucontext, key, len);
|
||||
if (!mm) {
|
||||
CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
|
||||
ucontext, key, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
addr = mm->addr;
|
||||
kfree(mm);
|
||||
|
||||
va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
|
||||
len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
|
||||
va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
|
||||
len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);
|
||||
|
||||
CTR6(KTR_IW_CXGBE,
|
||||
"%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
|
||||
addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
|
||||
va_udbs_res+len_udbs_res);
|
||||
|
||||
if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
|
||||
CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
|
||||
__func__, addr, va_regs_res, len_regs_res);
|
||||
/*
|
||||
* MA_SYNC register...
|
||||
*/
|
||||
paddr = vtophys(addr);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start,
|
||||
paddr >> PAGE_SHIFT,
|
||||
len, vma->vm_page_prot);
|
||||
} else {
|
||||
|
||||
if (addr >= va_udbs_res && addr < va_udbs_res + len_udbs_res) {
|
||||
/*
|
||||
* Map user DB or OCQP memory...
|
||||
*/
|
||||
paddr = vtophys(addr);
|
||||
CTR4(KTR_IW_CXGBE,
|
||||
"%s:6 USER DB-GTS addr %p region %p, reglen %u",
|
||||
__func__, addr, va_udbs_res, len_udbs_res);
|
||||
#ifdef DOT5
|
||||
if (is_t5(rdev->lldi.adapter_type) && map_udb_as_wc)
|
||||
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
|
||||
else
|
||||
#endif
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start,
|
||||
paddr >> PAGE_SHIFT,
|
||||
len, vma->vm_page_prot);
|
||||
} else {
|
||||
/*
|
||||
* Map WQ or CQ contig dma memory...
|
||||
*/
|
||||
CTR4(KTR_IW_CXGBE,
|
||||
"%s:7 WQ/CQ addr %p vm_start %u vma %p", __func__,
|
||||
addr, vma->vm_start, vma);
|
||||
ret = io_remap_pfn_range(vma, vma->vm_start,
|
||||
addr >> PAGE_SHIFT,
|
||||
len, vma->vm_page_prot);
|
||||
}
|
||||
}
|
||||
CTR4(KTR_IW_CXGBE, "%s:8 ctx %p vma %p ret %u", __func__, context, vma,
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_deallocate_pd(struct ib_pd *pd)
|
||||
{
|
||||
struct c4iw_pd *php = to_c4iw_pd(pd);
|
||||
struct c4iw_dev *rhp = php->rhp;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
|
||||
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
|
||||
mutex_lock(&rhp->rdev.stats.lock);
|
||||
rhp->rdev.stats.pd.cur--;
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
kfree(php);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static struct ib_pd *
|
||||
c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_pd *php;
|
||||
u32 pdid;
|
||||
struct c4iw_dev *rhp;
|
||||
|
||||
CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
|
||||
context, udata);
|
||||
rhp = (struct c4iw_dev *) ibdev;
|
||||
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
|
||||
if (!pdid)
|
||||
return ERR_PTR(-EINVAL);
|
||||
php = kzalloc(sizeof(*php), GFP_KERNEL);
|
||||
if (!php) {
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
php->pdid = pdid;
|
||||
php->rhp = rhp;
|
||||
if (context) {
|
||||
if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
|
||||
c4iw_deallocate_pd(&php->ibpd);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
}
|
||||
mutex_lock(&rhp->rdev.stats.lock);
|
||||
rhp->rdev.stats.pd.cur++;
|
||||
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
|
||||
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
|
||||
CTR6(KTR_IW_CXGBE,
|
||||
"%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
|
||||
ibdev, context, udata, pdid, php);
|
||||
return (&php->ibpd);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
||||
{
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
|
||||
ibdev, port, index, pkey);
|
||||
|
||||
*pkey = 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
struct port_info *pi;
|
||||
struct adapter *sc;
|
||||
|
||||
CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
|
||||
ibdev, port, index, gid);
|
||||
|
||||
memset(&gid->raw[0], 0, sizeof(gid->raw));
|
||||
dev = to_c4iw_dev(ibdev);
|
||||
sc = dev->rdev.adap;
|
||||
if (port == 0 || port > sc->params.nports)
|
||||
return (-EINVAL);
|
||||
pi = sc->port[port - 1];
|
||||
memcpy(&gid->raw[0], pi->hw_addr, sizeof(pi->hw_addr));
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
|
||||
{
|
||||
struct c4iw_dev *dev = to_c4iw_dev(ibdev);
|
||||
struct adapter *sc = dev->rdev.adap;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
|
||||
|
||||
memset(props, 0, sizeof *props);
|
||||
memcpy(&props->sys_image_guid, sc->port[0]->hw_addr, 6);
|
||||
props->hw_ver = sc->params.chipid;
|
||||
props->fw_ver = sc->params.fw_vers;
|
||||
props->device_cap_flags = dev->device_cap_flags;
|
||||
props->page_size_cap = T4_PAGESIZE_MASK;
|
||||
props->vendor_id = pci_get_vendor(sc->dev);
|
||||
props->vendor_part_id = pci_get_device(sc->dev);
|
||||
props->max_mr_size = T4_MAX_MR_SIZE;
|
||||
props->max_qp = T4_MAX_NUM_QP;
|
||||
props->max_qp_wr = T4_MAX_QP_DEPTH;
|
||||
props->max_sge = T4_MAX_RECV_SGE;
|
||||
props->max_sge_rd = 1;
|
||||
props->max_qp_rd_atom = c4iw_max_read_depth;
|
||||
props->max_qp_init_rd_atom = c4iw_max_read_depth;
|
||||
props->max_cq = T4_MAX_NUM_CQ;
|
||||
props->max_cqe = T4_MAX_CQ_DEPTH;
|
||||
props->max_mr = c4iw_num_stags(&dev->rdev);
|
||||
props->max_pd = T4_MAX_NUM_PD;
|
||||
props->local_ca_ack_delay = 0;
|
||||
props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns -errno on failure.
|
||||
*/
|
||||
static int
|
||||
c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
struct adapter *sc;
|
||||
struct port_info *pi;
|
||||
struct ifnet *ifp;
|
||||
|
||||
CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
|
||||
port, props);
|
||||
|
||||
dev = to_c4iw_dev(ibdev);
|
||||
sc = dev->rdev.adap;
|
||||
if (port > sc->params.nports)
|
||||
return (-EINVAL);
|
||||
pi = sc->port[port - 1];
|
||||
ifp = pi->ifp;
|
||||
|
||||
memset(props, 0, sizeof(struct ib_port_attr));
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
if (ifp->if_mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (ifp->if_mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (ifp->if_mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (ifp->if_mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
|
||||
props->port_cap_flags =
|
||||
IB_PORT_CM_SUP |
|
||||
IB_PORT_SNMP_TUNNEL_SUP |
|
||||
IB_PORT_REINIT_SUP |
|
||||
IB_PORT_DEVICE_MGMT_SUP |
|
||||
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
|
||||
props->gid_tbl_len = 1;
|
||||
props->pkey_tbl_len = 1;
|
||||
props->active_width = 2;
|
||||
props->active_speed = 2;
|
||||
props->max_msg_sz = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns -errno on error.
|
||||
*/
|
||||
int
|
||||
c4iw_register_device(struct c4iw_dev *dev)
|
||||
{
|
||||
struct adapter *sc = dev->rdev.adap;
|
||||
struct ib_device *ibdev = &dev->ibdev;
|
||||
struct iw_cm_verbs *iwcm;
|
||||
int ret;
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
|
||||
BUG_ON(!sc->port[0]);
|
||||
strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
|
||||
memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
|
||||
memcpy(&ibdev->node_guid, sc->port[0]->hw_addr, 6);
|
||||
ibdev->owner = THIS_MODULE;
|
||||
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
|
||||
if (fastreg_support)
|
||||
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
||||
ibdev->local_dma_lkey = 0;
|
||||
ibdev->uverbs_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
|
||||
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
|
||||
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
|
||||
(1ull << IB_USER_VERBS_CMD_REG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
|
||||
(1ull << IB_USER_VERBS_CMD_POST_RECV);
|
||||
ibdev->node_type = RDMA_NODE_RNIC;
|
||||
strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
|
||||
ibdev->phys_port_cnt = sc->params.nports;
|
||||
ibdev->num_comp_vectors = 1;
|
||||
ibdev->dma_device = sc->dev;
|
||||
ibdev->query_device = c4iw_query_device;
|
||||
ibdev->query_port = c4iw_query_port;
|
||||
ibdev->modify_port = c4iw_modify_port;
|
||||
ibdev->query_pkey = c4iw_query_pkey;
|
||||
ibdev->query_gid = c4iw_query_gid;
|
||||
ibdev->alloc_ucontext = c4iw_alloc_ucontext;
|
||||
ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
|
||||
ibdev->mmap = c4iw_mmap;
|
||||
ibdev->alloc_pd = c4iw_allocate_pd;
|
||||
ibdev->dealloc_pd = c4iw_deallocate_pd;
|
||||
ibdev->create_ah = c4iw_ah_create;
|
||||
ibdev->destroy_ah = c4iw_ah_destroy;
|
||||
ibdev->create_qp = c4iw_create_qp;
|
||||
ibdev->modify_qp = c4iw_ib_modify_qp;
|
||||
ibdev->query_qp = c4iw_ib_query_qp;
|
||||
ibdev->destroy_qp = c4iw_destroy_qp;
|
||||
ibdev->create_cq = c4iw_create_cq;
|
||||
ibdev->destroy_cq = c4iw_destroy_cq;
|
||||
ibdev->resize_cq = c4iw_resize_cq;
|
||||
ibdev->poll_cq = c4iw_poll_cq;
|
||||
ibdev->get_dma_mr = c4iw_get_dma_mr;
|
||||
ibdev->reg_phys_mr = c4iw_register_phys_mem;
|
||||
ibdev->rereg_phys_mr = c4iw_reregister_phys_mem;
|
||||
ibdev->reg_user_mr = c4iw_reg_user_mr;
|
||||
ibdev->dereg_mr = c4iw_dereg_mr;
|
||||
ibdev->alloc_mw = c4iw_alloc_mw;
|
||||
ibdev->bind_mw = c4iw_bind_mw;
|
||||
ibdev->dealloc_mw = c4iw_dealloc_mw;
|
||||
ibdev->alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
|
||||
ibdev->alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
|
||||
ibdev->free_fast_reg_page_list = c4iw_free_fastreg_pbl;
|
||||
ibdev->attach_mcast = c4iw_multicast_attach;
|
||||
ibdev->detach_mcast = c4iw_multicast_detach;
|
||||
ibdev->process_mad = c4iw_process_mad;
|
||||
ibdev->req_notify_cq = c4iw_arm_cq;
|
||||
ibdev->post_send = c4iw_post_send;
|
||||
ibdev->post_recv = c4iw_post_receive;
|
||||
ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
|
||||
|
||||
iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
|
||||
if (iwcm == NULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
iwcm->connect = c4iw_connect;
|
||||
iwcm->accept = c4iw_accept_cr;
|
||||
iwcm->reject = c4iw_reject_cr;
|
||||
iwcm->create_listen = c4iw_create_listen;
|
||||
iwcm->destroy_listen = c4iw_destroy_listen;
|
||||
iwcm->add_ref = c4iw_qp_add_ref;
|
||||
iwcm->rem_ref = c4iw_qp_rem_ref;
|
||||
iwcm->get_qp = c4iw_get_qp;
|
||||
ibdev->iwcm = iwcm;
|
||||
|
||||
ret = ib_register_device(&dev->ibdev, NULL);
|
||||
if (ret)
|
||||
kfree(iwcm);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
c4iw_unregister_device(struct c4iw_dev *dev)
|
||||
{
|
||||
|
||||
CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
|
||||
dev->rdev.adap);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
kfree(dev->ibdev.iwcm);
|
||||
return;
|
||||
}
|
||||
#endif
|
1707
sys/dev/cxgbe/iw_cxgbe/qp.c
Normal file
1707
sys/dev/cxgbe/iw_cxgbe/qp.c
Normal file
File diff suppressed because it is too large
Load Diff
342
sys/dev/cxgbe/iw_cxgbe/resource.c
Normal file
342
sys/dev/cxgbe/iw_cxgbe/resource.c
Normal file
@ -0,0 +1,342 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
/* Crude resource management */
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <linux/spinlock.h>
|
||||
#include "iw_cxgbe.h"
|
||||
|
||||
static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (c4iw_id_table_alloc(&rdev->resource.qid_table,
|
||||
rdev->adap->vres.qp.start,
|
||||
rdev->adap->vres.qp.size,
|
||||
rdev->adap->vres.qp.size, 0)) {
|
||||
printf("%s: return ENOMEM\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = rdev->adap->vres.qp.start;
|
||||
i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++)
|
||||
if (!(i & rdev->qpmask))
|
||||
c4iw_id_free(&rdev->resource.qid_table, i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nr_* must be power of 2 */
|
||||
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
|
||||
{
|
||||
int err = 0;
|
||||
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
|
||||
C4IW_ID_TABLE_F_RANDOM);
|
||||
if (err)
|
||||
goto tpt_err;
|
||||
err = c4iw_init_qid_table(rdev);
|
||||
if (err)
|
||||
goto qid_err;
|
||||
err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
|
||||
nr_pdid, 1, 0);
|
||||
if (err)
|
||||
goto pdid_err;
|
||||
return 0;
|
||||
pdid_err:
|
||||
c4iw_id_table_free(&rdev->resource.qid_table);
|
||||
qid_err:
|
||||
c4iw_id_table_free(&rdev->resource.tpt_table);
|
||||
tpt_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if no resource available
|
||||
*/
|
||||
u32 c4iw_get_resource(struct c4iw_id_table *id_table)
|
||||
{
|
||||
u32 entry;
|
||||
entry = c4iw_id_alloc(id_table);
|
||||
if (entry == (u32)(-1)) {
|
||||
return 0;
|
||||
}
|
||||
return entry;
|
||||
}
|
||||
|
||||
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
|
||||
{
|
||||
CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry);
|
||||
c4iw_id_free(id_table, entry);
|
||||
}
|
||||
|
||||
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct c4iw_qid_list *entry;
|
||||
u32 qid;
|
||||
int i;
|
||||
|
||||
mutex_lock(&uctx->lock);
|
||||
if (!list_empty(&uctx->cqids)) {
|
||||
entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
|
||||
entry);
|
||||
list_del(&entry->entry);
|
||||
qid = entry->qid;
|
||||
kfree(entry);
|
||||
} else {
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_table);
|
||||
if (!qid)
|
||||
goto out;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur += rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
for (i = qid+1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = i;
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
}
|
||||
|
||||
/*
|
||||
* now put the same ids on the qp list since they all
|
||||
* map to the same db/gts page.
|
||||
*/
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = qid;
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
for (i = qid+1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = i;
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return qid;
|
||||
}
|
||||
|
||||
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct c4iw_qid_list *entry;
|
||||
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
return;
|
||||
CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
|
||||
entry->qid = qid;
|
||||
mutex_lock(&uctx->lock);
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
mutex_unlock(&uctx->lock);
|
||||
}
|
||||
|
||||
u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct c4iw_qid_list *entry;
|
||||
u32 qid;
|
||||
int i;
|
||||
|
||||
mutex_lock(&uctx->lock);
|
||||
if (!list_empty(&uctx->qpids)) {
|
||||
entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
|
||||
entry);
|
||||
list_del(&entry->entry);
|
||||
qid = entry->qid;
|
||||
kfree(entry);
|
||||
} else {
|
||||
qid = c4iw_get_resource(&rdev->resource.qid_table);
|
||||
if (!qid)
|
||||
goto out;
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.qid.cur += rdev->qpmask + 1;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
for (i = qid+1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = i;
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
}
|
||||
|
||||
/*
|
||||
* now put the same ids on the cq list since they all
|
||||
* map to the same db/gts page.
|
||||
*/
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = qid;
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
for (i = qid; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
entry->qid = i;
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return qid;
|
||||
}
|
||||
|
||||
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
{
|
||||
struct c4iw_qid_list *entry;
|
||||
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
return;
|
||||
CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
|
||||
entry->qid = qid;
|
||||
mutex_lock(&uctx->lock);
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
mutex_unlock(&uctx->lock);
|
||||
}
|
||||
|
||||
void c4iw_destroy_resource(struct c4iw_resource *rscp)
|
||||
{
|
||||
c4iw_id_table_free(&rscp->tpt_table);
|
||||
c4iw_id_table_free(&rscp->qid_table);
|
||||
c4iw_id_table_free(&rscp->pdid_table);
|
||||
}
|
||||
|
||||
/* PBL Memory Manager. Uses Linux generic allocator. */
|
||||
|
||||
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
|
||||
|
||||
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
|
||||
CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
|
||||
rdev->stats.pbl.max = rdev->stats.pbl.cur;
|
||||
} else
|
||||
rdev->stats.pbl.fail++;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
||||
{
|
||||
rdev->pbl_pool = gen_pool_create(rdev->adap->vres.pbl.start,
|
||||
MIN_PBL_SHIFT,
|
||||
rdev->adap->vres.pbl.size);
|
||||
if (!rdev->pbl_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
|
||||
{
|
||||
gen_pool_destroy(rdev->pbl_pool);
|
||||
}
|
||||
|
||||
/* RQT Memory Manager. Uses Linux generic allocator. */
|
||||
|
||||
#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
|
||||
|
||||
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
|
||||
CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr,
|
||||
size << 6);
|
||||
if (!addr)
|
||||
printf("%s: Out of RQT memory\n",
|
||||
device_get_nameunit(rdev->adap->dev));
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
||||
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
|
||||
rdev->stats.rqt.max = rdev->stats.rqt.cur;
|
||||
} else
|
||||
rdev->stats.rqt.fail++;
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
|
||||
}
|
||||
|
||||
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
||||
{
|
||||
rdev->rqt_pool = gen_pool_create(rdev->adap->vres.rq.start,
|
||||
MIN_RQT_SHIFT,
|
||||
rdev->adap->vres.rq.size);
|
||||
if (!rdev->rqt_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
|
||||
{
|
||||
gen_pool_destroy(rdev->rqt_pool);
|
||||
}
|
||||
#endif
|
597
sys/dev/cxgbe/iw_cxgbe/t4.h
Normal file
597
sys/dev/cxgbe/iw_cxgbe/t4.h
Normal file
@ -0,0 +1,597 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#ifndef __T4_H__
|
||||
#define __T4_H__
|
||||
|
||||
/*
|
||||
* Fixme: Adding missing defines
|
||||
*/
|
||||
#define SGE_PF_KDOORBELL 0x0
|
||||
#define QID_MASK 0xffff8000U
|
||||
#define QID_SHIFT 15
|
||||
#define QID(x) ((x) << QID_SHIFT)
|
||||
#define DBPRIO 0x00004000U
|
||||
#define PIDX_MASK 0x00003fffU
|
||||
#define PIDX_SHIFT 0
|
||||
#define PIDX(x) ((x) << PIDX_SHIFT)
|
||||
|
||||
#define SGE_PF_GTS 0x4
|
||||
#define INGRESSQID_MASK 0xffff0000U
|
||||
#define INGRESSQID_SHIFT 16
|
||||
#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
|
||||
#define TIMERREG_MASK 0x0000e000U
|
||||
#define TIMERREG_SHIFT 13
|
||||
#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
|
||||
#define SEINTARM_MASK 0x00001000U
|
||||
#define SEINTARM_SHIFT 12
|
||||
#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
|
||||
#define CIDXINC_MASK 0x00000fffU
|
||||
#define CIDXINC_SHIFT 0
|
||||
#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
|
||||
|
||||
#define T4_MAX_NUM_QP (1<<16)
|
||||
#define T4_MAX_NUM_CQ (1<<15)
|
||||
#define T4_MAX_NUM_PD (1<<15)
|
||||
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
|
||||
#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
|
||||
#define T4_MAX_IQ_SIZE (65520 - 1)
|
||||
#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
|
||||
#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
|
||||
#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
|
||||
#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
|
||||
#define T4_MAX_NUM_STAG (1<<15)
|
||||
#define T4_MAX_MR_SIZE (~0ULL - 1)
|
||||
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
|
||||
#define T4_STAG_UNSET 0xffffffff
|
||||
#define T4_FW_MAJ 0
|
||||
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
|
||||
#define A_PCIE_MA_SYNC 0x30b4
|
||||
|
||||
struct t4_status_page {
|
||||
__be32 rsvd1; /* flit 0 - hw owns */
|
||||
__be16 rsvd2;
|
||||
__be16 qid;
|
||||
__be16 cidx;
|
||||
__be16 pidx;
|
||||
u8 qp_err; /* flit 1 - sw owns */
|
||||
u8 db_off;
|
||||
u8 pad;
|
||||
u16 host_wq_pidx;
|
||||
u16 host_cidx;
|
||||
u16 host_pidx;
|
||||
};
|
||||
|
||||
#define T4_EQ_ENTRY_SIZE 64
|
||||
|
||||
#define T4_SQ_NUM_SLOTS 5
|
||||
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
|
||||
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
|
||||
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
|
||||
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
|
||||
sizeof(struct fw_ri_immd)))
|
||||
#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
|
||||
sizeof(struct fw_ri_rdma_write_wr) - \
|
||||
sizeof(struct fw_ri_immd)))
|
||||
#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
|
||||
sizeof(struct fw_ri_rdma_write_wr) - \
|
||||
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
|
||||
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
|
||||
sizeof(struct fw_ri_immd)) & ~31UL)
|
||||
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
|
||||
|
||||
#define T4_RQ_NUM_SLOTS 2
|
||||
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
|
||||
#define T4_MAX_RECV_SGE 4
|
||||
|
||||
union t4_wr {
|
||||
struct fw_ri_res_wr res;
|
||||
struct fw_ri_wr ri;
|
||||
struct fw_ri_rdma_write_wr write;
|
||||
struct fw_ri_send_wr send;
|
||||
struct fw_ri_rdma_read_wr read;
|
||||
struct fw_ri_bind_mw_wr bind;
|
||||
struct fw_ri_fr_nsmr_wr fr;
|
||||
struct fw_ri_inv_lstag_wr inv;
|
||||
struct t4_status_page status;
|
||||
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
|
||||
};
|
||||
|
||||
union t4_recv_wr {
|
||||
struct fw_ri_recv_wr recv;
|
||||
struct t4_status_page status;
|
||||
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
|
||||
};
|
||||
|
||||
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
|
||||
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
|
||||
{
|
||||
wqe->send.opcode = (u8)opcode;
|
||||
wqe->send.flags = flags;
|
||||
wqe->send.wrid = wrid;
|
||||
wqe->send.r1[0] = 0;
|
||||
wqe->send.r1[1] = 0;
|
||||
wqe->send.r1[2] = 0;
|
||||
wqe->send.len16 = len16;
|
||||
}
|
||||
|
||||
/* CQE/AE status codes */
|
||||
#define T4_ERR_SUCCESS 0x0
|
||||
#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
|
||||
/* STAG is offlimt, being 0, */
|
||||
/* or STAG_key mismatch */
|
||||
#define T4_ERR_PDID 0x2 /* PDID mismatch */
|
||||
#define T4_ERR_QPID 0x3 /* QPID mismatch */
|
||||
#define T4_ERR_ACCESS 0x4 /* Invalid access right */
|
||||
#define T4_ERR_WRAP 0x5 /* Wrap error */
|
||||
#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
|
||||
#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define T4_ERR_ECC 0x9 /* ECC error detected */
|
||||
#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
|
||||
/* reading PSTAG for a MW */
|
||||
/* Invalidate */
|
||||
#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
|
||||
/* software error */
|
||||
#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
|
||||
#define T4_ERR_CRC 0x10 /* CRC error */
|
||||
#define T4_ERR_MARKER 0x11 /* Marker error */
|
||||
#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
|
||||
#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
|
||||
#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
|
||||
#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
|
||||
#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
|
||||
#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
|
||||
#define T4_ERR_MSN 0x18 /* MSN error */
|
||||
#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
|
||||
#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
|
||||
/* or READ_REQ */
|
||||
#define T4_ERR_MSN_GAP 0x1B
|
||||
#define T4_ERR_MSN_RANGE 0x1C
|
||||
#define T4_ERR_IRD_OVERFLOW 0x1D
|
||||
#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
|
||||
/* software error */
|
||||
#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
|
||||
/* mismatch) */
|
||||
/*
|
||||
* CQE defs
|
||||
*/
|
||||
struct t4_cqe {
|
||||
__be32 header;
|
||||
__be32 len;
|
||||
union {
|
||||
struct {
|
||||
__be32 stag;
|
||||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 nada1;
|
||||
u16 nada2;
|
||||
u16 cidx;
|
||||
} scqe;
|
||||
struct {
|
||||
__be32 wrid_hi;
|
||||
__be32 wrid_low;
|
||||
} gen;
|
||||
} u;
|
||||
__be64 reserved;
|
||||
__be64 bits_type_ts;
|
||||
};
|
||||
|
||||
/* macros for flit 0 of the cqe */
|
||||
|
||||
#define S_CQE_QPID 12
|
||||
#define M_CQE_QPID 0xFFFFF
|
||||
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
|
||||
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
|
||||
|
||||
#define S_CQE_SWCQE 11
|
||||
#define M_CQE_SWCQE 0x1
|
||||
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
|
||||
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
|
||||
|
||||
#define S_CQE_STATUS 5
|
||||
#define M_CQE_STATUS 0x1F
|
||||
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
|
||||
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
|
||||
|
||||
#define S_CQE_TYPE 4
|
||||
#define M_CQE_TYPE 0x1
|
||||
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
|
||||
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
|
||||
|
||||
#define S_CQE_OPCODE 0
|
||||
#define M_CQE_OPCODE 0xF
|
||||
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
|
||||
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
|
||||
|
||||
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
|
||||
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
|
||||
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
|
||||
#define SQ_TYPE(x) (CQE_TYPE((x)))
|
||||
#define RQ_TYPE(x) (!CQE_TYPE((x)))
|
||||
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
|
||||
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
|
||||
|
||||
#define CQE_SEND_OPCODE(x)(\
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
|
||||
|
||||
#define CQE_LEN(x) (be32_to_cpu((x)->len))
|
||||
|
||||
/* used for RQ completion processing */
|
||||
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
|
||||
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
|
||||
|
||||
/* used for SQ completion processing */
|
||||
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
|
||||
|
||||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
|
||||
#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
|
||||
|
||||
/* macros for flit 3 of the cqe */
|
||||
#define S_CQE_GENBIT 63
|
||||
#define M_CQE_GENBIT 0x1
|
||||
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
|
||||
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
|
||||
|
||||
#define S_CQE_OVFBIT 62
|
||||
#define M_CQE_OVFBIT 0x1
|
||||
#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
|
||||
|
||||
#define S_CQE_IQTYPE 60
|
||||
#define M_CQE_IQTYPE 0x3
|
||||
#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
|
||||
|
||||
#define M_CQE_TS 0x0fffffffffffffffULL
|
||||
#define G_CQE_TS(x) ((x) & M_CQE_TS)
|
||||
|
||||
#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
|
||||
#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
|
||||
#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
|
||||
|
||||
struct t4_swsqe {
|
||||
u64 wr_id;
|
||||
struct t4_cqe cqe;
|
||||
int read_len;
|
||||
int opcode;
|
||||
int complete;
|
||||
int signaled;
|
||||
u16 idx;
|
||||
};
|
||||
|
||||
struct t4_sq {
|
||||
union t4_wr *queue;
|
||||
bus_addr_t dma_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
unsigned long phys_addr;
|
||||
struct t4_swsqe *sw_sq;
|
||||
struct t4_swsqe *oldest_read;
|
||||
u64 udb;
|
||||
size_t memsize;
|
||||
u32 qid;
|
||||
u16 in_use;
|
||||
u16 size;
|
||||
u16 cidx;
|
||||
u16 pidx;
|
||||
u16 wq_pidx;
|
||||
u16 flags;
|
||||
};
|
||||
|
||||
struct t4_swrqe {
|
||||
u64 wr_id;
|
||||
};
|
||||
|
||||
struct t4_rq {
|
||||
union t4_recv_wr *queue;
|
||||
bus_addr_t dma_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
struct t4_swrqe *sw_rq;
|
||||
u64 udb;
|
||||
size_t memsize;
|
||||
u32 qid;
|
||||
u32 msn;
|
||||
u32 rqt_hwaddr;
|
||||
u16 rqt_size;
|
||||
u16 in_use;
|
||||
u16 size;
|
||||
u16 cidx;
|
||||
u16 pidx;
|
||||
u16 wq_pidx;
|
||||
};
|
||||
|
||||
struct t4_wq {
|
||||
struct t4_sq sq;
|
||||
struct t4_rq rq;
|
||||
void __iomem *db;
|
||||
void __iomem *gts;
|
||||
struct c4iw_rdev *rdev;
|
||||
};
|
||||
|
||||
static inline int t4_rqes_posted(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use;
|
||||
}
|
||||
|
||||
static inline int t4_rq_empty(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_rq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use == (wq->rq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_rq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.size - 1 - wq->rq.in_use;
|
||||
}
|
||||
|
||||
static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
|
||||
{
|
||||
wq->rq.in_use++;
|
||||
if (++wq->rq.pidx == wq->rq.size)
|
||||
wq->rq.pidx = 0;
|
||||
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
|
||||
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline void t4_rq_consume(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.in_use--;
|
||||
wq->rq.msn++;
|
||||
if (++wq->rq.cidx == wq->rq.size)
|
||||
wq->rq.cidx = 0;
|
||||
}
|
||||
|
||||
static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
|
||||
}
|
||||
|
||||
static inline u16 t4_rq_wq_size(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.size * T4_RQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline int t4_sq_empty(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_sq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.in_use == (wq->sq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_sq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.size - 1 - wq->sq.in_use;
|
||||
}
|
||||
|
||||
static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
|
||||
{
|
||||
wq->sq.in_use++;
|
||||
if (++wq->sq.pidx == wq->sq.size)
|
||||
wq->sq.pidx = 0;
|
||||
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
|
||||
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline void t4_sq_consume(struct t4_wq *wq)
|
||||
{
|
||||
wq->sq.in_use--;
|
||||
if (++wq->sq.cidx == wq->sq.size)
|
||||
wq->sq.cidx = 0;
|
||||
}
|
||||
|
||||
static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
|
||||
}
|
||||
|
||||
static inline u16 t4_sq_wq_size(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.size * T4_SQ_NUM_SLOTS;
|
||||
}
|
||||
|
||||
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
|
||||
{
|
||||
wmb();
|
||||
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
|
||||
}
|
||||
|
||||
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
|
||||
{
|
||||
wmb();
|
||||
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
|
||||
}
|
||||
|
||||
static inline int t4_wq_in_error(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.queue[wq->rq.size].status.qp_err;
|
||||
}
|
||||
|
||||
static inline void t4_set_wq_in_error(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.queue[wq->rq.size].status.qp_err = 1;
|
||||
}
|
||||
|
||||
static inline void t4_disable_wq_db(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.queue[wq->rq.size].status.db_off = 1;
|
||||
}
|
||||
|
||||
static inline void t4_enable_wq_db(struct t4_wq *wq)
|
||||
{
|
||||
wq->rq.queue[wq->rq.size].status.db_off = 0;
|
||||
}
|
||||
|
||||
static inline int t4_wq_db_enabled(struct t4_wq *wq)
|
||||
{
|
||||
return !wq->rq.queue[wq->rq.size].status.db_off;
|
||||
}
|
||||
|
||||
struct t4_cq {
|
||||
struct t4_cqe *queue;
|
||||
bus_addr_t dma_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
struct t4_cqe *sw_queue;
|
||||
void __iomem *gts;
|
||||
struct c4iw_rdev *rdev;
|
||||
u64 ugts;
|
||||
size_t memsize;
|
||||
__be64 bits_type_ts;
|
||||
u32 cqid;
|
||||
u16 size; /* including status page */
|
||||
u16 cidx;
|
||||
u16 sw_pidx;
|
||||
u16 sw_cidx;
|
||||
u16 sw_in_use;
|
||||
u16 cidx_inc;
|
||||
u8 gen;
|
||||
u8 error;
|
||||
};
|
||||
|
||||
static inline int t4_arm_cq(struct t4_cq *cq, int se)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
while (cq->cidx_inc > CIDXINC_MASK) {
|
||||
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
|
||||
INGRESSQID(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc -= CIDXINC_MASK;
|
||||
}
|
||||
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
|
||||
INGRESSQID(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void t4_swcq_produce(struct t4_cq *cq)
|
||||
{
|
||||
cq->sw_in_use++;
|
||||
if (++cq->sw_pidx == cq->size)
|
||||
cq->sw_pidx = 0;
|
||||
}
|
||||
|
||||
static inline void t4_swcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
cq->sw_in_use--;
|
||||
if (++cq->sw_cidx == cq->size)
|
||||
cq->sw_cidx = 0;
|
||||
}
|
||||
|
||||
static inline void t4_hwcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
|
||||
if (++cq->cidx_inc == (cq->size >> 4)) {
|
||||
u32 val;
|
||||
|
||||
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
|
||||
INGRESSQID(cq->cqid);
|
||||
writel(val, cq->gts);
|
||||
cq->cidx_inc = 0;
|
||||
}
|
||||
if (++cq->cidx == cq->size) {
|
||||
cq->cidx = 0;
|
||||
cq->gen ^= 1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
|
||||
{
|
||||
return (CQE_GENBIT(cqe) == cq->gen);
|
||||
}
|
||||
|
||||
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
{
|
||||
int ret;
|
||||
u16 prev_cidx;
|
||||
|
||||
if (cq->cidx == 0)
|
||||
prev_cidx = cq->size - 1;
|
||||
else
|
||||
prev_cidx = cq->cidx - 1;
|
||||
|
||||
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
|
||||
ret = -EOVERFLOW;
|
||||
cq->error = 1;
|
||||
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
|
||||
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
|
||||
*cqe = &cq->queue[cq->cidx];
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -ENODATA;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
|
||||
{
|
||||
if (cq->sw_in_use)
|
||||
return &cq->sw_queue[cq->sw_cidx];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (cq->error)
|
||||
ret = -ENODATA;
|
||||
else if (cq->sw_in_use)
|
||||
*cqe = &cq->sw_queue[cq->sw_cidx];
|
||||
else
|
||||
ret = t4_next_hw_cqe(cq, cqe);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int t4_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
|
||||
}
|
||||
|
||||
static inline void t4_set_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
|
||||
}
|
||||
#endif
|
70
sys/dev/cxgbe/iw_cxgbe/user.h
Normal file
70
sys/dev/cxgbe/iw_cxgbe/user.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#ifndef __C4IW_USER_H__
|
||||
#define __C4IW_USER_H__
|
||||
|
||||
#define C4IW_UVERBS_ABI_VERSION 2
|
||||
|
||||
/*
|
||||
* Make sure that all structs defined in this file remain laid out so
|
||||
* that they pack the same way on 32-bit and 64-bit architectures (to
|
||||
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
|
||||
* In particular do not use pointer types -- pass pointers in __u64
|
||||
* instead.
|
||||
*/
|
||||
struct c4iw_create_cq_resp {
|
||||
__u64 key;
|
||||
__u64 gts_key;
|
||||
__u64 memsize;
|
||||
__u32 cqid;
|
||||
__u32 size;
|
||||
__u32 qid_mask;
|
||||
};
|
||||
|
||||
struct c4iw_create_qp_resp {
|
||||
__u64 ma_sync_key;
|
||||
__u64 sq_key;
|
||||
__u64 rq_key;
|
||||
__u64 sq_db_gts_key;
|
||||
__u64 rq_db_gts_key;
|
||||
__u64 sq_memsize;
|
||||
__u64 rq_memsize;
|
||||
__u32 sqid;
|
||||
__u32 rqid;
|
||||
__u32 sq_size;
|
||||
__u32 rq_size;
|
||||
__u32 qid_mask;
|
||||
__u32 flags;
|
||||
};
|
||||
#endif
|
@ -6,9 +6,16 @@ SUBDIR = if_cxgbe
|
||||
SUBDIR+= t4_firmware
|
||||
SUBDIR+= t5_firmware
|
||||
SUBDIR+= ${_tom}
|
||||
SUBDIR+= ${_iw_cxgbe}
|
||||
|
||||
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
|
||||
.if ${MACHINE_CPUARCH} == "amd64"
|
||||
_tom= tom
|
||||
_iw_cxgbe= iw_cxgbe
|
||||
.endif
|
||||
|
||||
.if ${MACHINE_CPUARCH} == "i386"
|
||||
_tom= tom
|
||||
.endif
|
||||
|
||||
|
||||
.include <bsd.subdir.mk>
|
||||
|
27
sys/modules/cxgbe/iw_cxgbe/Makefile
Normal file
27
sys/modules/cxgbe/iw_cxgbe/Makefile
Normal file
@ -0,0 +1,27 @@
|
||||
# $FreeBSD$
|
||||
|
||||
.include <bsd.own.mk>
|
||||
|
||||
CXGBE = ${.CURDIR}/../../../dev/cxgbe
|
||||
.PATH: ${CXGBE}/iw_cxgbe
|
||||
|
||||
KMOD= iw_cxgbe
|
||||
SRCS= device.c cm.c provider.c mem.c cq.c qp.c resource.c ev.c id_table.c
|
||||
SRCS+= bus_if.h device_if.h opt_sched.h pci_if.h pcib_if.h opt_ktr.h
|
||||
SRCS+= opt_inet.h opt_ofed.h vnode_if.h
|
||||
CFLAGS+= -I${CXGBE} -I${.CURDIR}/../../../ofed/include -DLINUX_TYPES_DEFINED
|
||||
|
||||
.if !defined(KERNBUILDDIR)
|
||||
.if ${MK_INET_SUPPORT} != "no"
|
||||
opt_inet.h:
|
||||
@echo "#define INET 1" > ${.TARGET}
|
||||
@echo "#define TCP_OFFLOAD 1" >> ${.TARGET}
|
||||
.endif
|
||||
|
||||
.if ${MK_INET6_SUPPORT} != "no"
|
||||
opt_inet6.h:
|
||||
@echo "#define INET6 1" > ${.TARGET}
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.include <bsd.kmod.mk>
|
Loading…
Reference in New Issue
Block a user