Retire the T3 iWARP and TOE drivers. This saves catch-up work when OFED or
other kernel infrastructure changes. Note that this doesn't affect the base cxgb(4) NIC driver for T3 at all. MFC after: No MFC. Sponsored by: Chelsio Communications
This commit is contained in:
parent
e5dc78af11
commit
26cee56642
@ -1,299 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/eventhandler.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/toecore.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
|
||||
static int iwch_mod_load(void);
|
||||
static int iwch_mod_unload(void);
|
||||
static int iwch_activate(struct adapter *);
|
||||
static int iwch_deactivate(struct adapter *);
|
||||
|
||||
static struct uld_info iwch_uld_info = {
|
||||
.uld_id = ULD_IWARP,
|
||||
.activate = iwch_activate,
|
||||
.deactivate = iwch_deactivate,
|
||||
};
|
||||
|
||||
static void
|
||||
rnic_init(struct iwch_dev *rnicp)
|
||||
{
|
||||
|
||||
idr_init(&rnicp->cqidr);
|
||||
idr_init(&rnicp->qpidr);
|
||||
idr_init(&rnicp->mmidr);
|
||||
mtx_init(&rnicp->lock, "iwch rnic lock", NULL, MTX_DEF|MTX_DUPOK);
|
||||
|
||||
rnicp->attr.vendor_id = 0x168;
|
||||
rnicp->attr.vendor_part_id = 7;
|
||||
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
|
||||
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
|
||||
rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
|
||||
rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
|
||||
rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
|
||||
rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
|
||||
rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
|
||||
rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
|
||||
rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
|
||||
rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
|
||||
rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
|
||||
rnicp->attr.can_resize_wq = 0;
|
||||
rnicp->attr.max_rdma_reads_per_qp = 8;
|
||||
rnicp->attr.max_rdma_read_resources =
|
||||
rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
|
||||
rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
|
||||
rnicp->attr.max_rdma_read_depth =
|
||||
rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
|
||||
rnicp->attr.rq_overflow_handled = 0;
|
||||
rnicp->attr.can_modify_ird = 0;
|
||||
rnicp->attr.can_modify_ord = 0;
|
||||
rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
|
||||
rnicp->attr.stag0_value = 1;
|
||||
rnicp->attr.zbva_support = 1;
|
||||
rnicp->attr.local_invalidate_fence = 1;
|
||||
rnicp->attr.cq_overflow_detection = 1;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
rnic_uninit(struct iwch_dev *rnicp)
|
||||
{
|
||||
idr_destroy(&rnicp->cqidr);
|
||||
idr_destroy(&rnicp->qpidr);
|
||||
idr_destroy(&rnicp->mmidr);
|
||||
mtx_destroy(&rnicp->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
iwch_activate(struct adapter *sc)
|
||||
{
|
||||
struct iwch_dev *rnicp;
|
||||
int rc;
|
||||
|
||||
KASSERT(!isset(&sc->offload_map, MAX_NPORTS),
|
||||
("%s: iWARP already activated on %s", __func__,
|
||||
device_get_nameunit(sc->dev)));
|
||||
|
||||
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
|
||||
if (rnicp == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
sc->iwarp_softc = rnicp;
|
||||
rnicp->rdev.adap = sc;
|
||||
|
||||
cxio_hal_init(sc);
|
||||
iwch_cm_init_cpl(sc);
|
||||
|
||||
rc = cxio_rdev_open(&rnicp->rdev);
|
||||
if (rc != 0) {
|
||||
printf("Unable to open CXIO rdev\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
rnic_init(rnicp);
|
||||
|
||||
rc = iwch_register_device(rnicp);
|
||||
if (rc != 0) {
|
||||
printf("Unable to register device\n");
|
||||
goto err2;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
err2:
|
||||
rnic_uninit(rnicp);
|
||||
cxio_rdev_close(&rnicp->rdev);
|
||||
err1:
|
||||
cxio_hal_uninit(sc);
|
||||
iwch_cm_term_cpl(sc);
|
||||
sc->iwarp_softc = NULL;
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
iwch_deactivate(struct adapter *sc)
|
||||
{
|
||||
struct iwch_dev *rnicp;
|
||||
|
||||
rnicp = sc->iwarp_softc;
|
||||
|
||||
iwch_unregister_device(rnicp);
|
||||
rnic_uninit(rnicp);
|
||||
cxio_rdev_close(&rnicp->rdev);
|
||||
cxio_hal_uninit(sc);
|
||||
iwch_cm_term_cpl(sc);
|
||||
ib_dealloc_device(&rnicp->ibdev);
|
||||
|
||||
sc->iwarp_softc = NULL;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
iwch_activate_all(struct adapter *sc, void *arg __unused)
|
||||
{
|
||||
ADAPTER_LOCK(sc);
|
||||
if ((sc->open_device_map & sc->offload_map) != 0 &&
|
||||
t3_activate_uld(sc, ULD_IWARP) == 0)
|
||||
setbit(&sc->offload_map, MAX_NPORTS);
|
||||
ADAPTER_UNLOCK(sc);
|
||||
}
|
||||
|
||||
static void
|
||||
iwch_deactivate_all(struct adapter *sc, void *arg __unused)
|
||||
{
|
||||
ADAPTER_LOCK(sc);
|
||||
if (isset(&sc->offload_map, MAX_NPORTS) &&
|
||||
t3_deactivate_uld(sc, ULD_IWARP) == 0)
|
||||
clrbit(&sc->offload_map, MAX_NPORTS);
|
||||
ADAPTER_UNLOCK(sc);
|
||||
}
|
||||
|
||||
static int
|
||||
iwch_mod_load(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = iwch_cm_init();
|
||||
if (rc != 0)
|
||||
return (rc);
|
||||
|
||||
rc = t3_register_uld(&iwch_uld_info);
|
||||
if (rc != 0) {
|
||||
iwch_cm_term();
|
||||
return (rc);
|
||||
}
|
||||
|
||||
t3_iterate(iwch_activate_all, NULL);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
iwch_mod_unload(void)
|
||||
{
|
||||
t3_iterate(iwch_deactivate_all, NULL);
|
||||
|
||||
iwch_cm_term();
|
||||
|
||||
if (t3_unregister_uld(&iwch_uld_info) == EBUSY)
|
||||
return (EBUSY);
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* TCP_OFFLOAD */
|
||||
|
||||
static int
|
||||
iwch_modevent(module_t mod, int cmd, void *arg)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
switch (cmd) {
|
||||
case MOD_LOAD:
|
||||
rc = iwch_mod_load();
|
||||
if(rc)
|
||||
printf("iw_cxgb: Chelsio T3 RDMA Driver failed to load\n");
|
||||
else
|
||||
printf("iw_cxgb: Chelsio T3 RDMA Driver loaded\n");
|
||||
break;
|
||||
|
||||
case MOD_UNLOAD:
|
||||
rc = iwch_mod_unload();
|
||||
if(rc)
|
||||
printf("iw_cxgb: Chelsio T3 RDMA Driver failed to unload\n");
|
||||
else
|
||||
printf("iw_cxgb: Chelsio T3 RDMA Driver unloaded\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = EINVAL;
|
||||
}
|
||||
#else
|
||||
printf("iw_cxgb: compiled without TCP_OFFLOAD support.\n");
|
||||
rc = EOPNOTSUPP;
|
||||
#endif
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static moduledata_t iwch_mod_data = {
|
||||
"iw_cxgb",
|
||||
iwch_modevent,
|
||||
0
|
||||
};
|
||||
|
||||
MODULE_VERSION(iw_cxgb, 1);
|
||||
DECLARE_MODULE(iw_cxgb, iwch_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);
|
||||
MODULE_DEPEND(t3_tom, cxgbc, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgb, toecore, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgb, t3_tom, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgb, ibcore, 1, 1, 1);
|
||||
MODULE_DEPEND(iw_cxgb, linuxkpi, 1, 1, 1);
|
||||
|
@ -1,178 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef __IWCH_H__
|
||||
#define __IWCH_H__
|
||||
|
||||
struct iwch_pd;
|
||||
struct iwch_cq;
|
||||
struct iwch_qp;
|
||||
struct iwch_mr;
|
||||
|
||||
enum t3ctype {
|
||||
T3A = 0,
|
||||
T3B,
|
||||
T3C
|
||||
};
|
||||
|
||||
#define PAGE_MASK_IWARP (~(PAGE_SIZE-1))
|
||||
|
||||
struct iwch_rnic_attributes {
|
||||
u32 vendor_id;
|
||||
u32 vendor_part_id;
|
||||
u32 max_qps;
|
||||
u32 max_wrs; /* Max for any SQ/RQ */
|
||||
u32 max_sge_per_wr;
|
||||
u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
|
||||
u32 max_cqs;
|
||||
u32 max_cqes_per_cq;
|
||||
u32 max_mem_regs;
|
||||
u32 max_phys_buf_entries; /* for phys buf list */
|
||||
u32 max_pds;
|
||||
|
||||
/*
|
||||
* The memory page sizes supported by this RNIC.
|
||||
* Bit position i in bitmap indicates page of
|
||||
* size (4k)^i. Phys block list mode unsupported.
|
||||
*/
|
||||
u32 mem_pgsizes_bitmask;
|
||||
u64 max_mr_size;
|
||||
u8 can_resize_wq;
|
||||
|
||||
/*
|
||||
* The maximum number of RDMA Reads that can be outstanding
|
||||
* per QP with this RNIC as the target.
|
||||
*/
|
||||
u32 max_rdma_reads_per_qp;
|
||||
|
||||
/*
|
||||
* The maximum number of resources used for RDMA Reads
|
||||
* by this RNIC with this RNIC as the target.
|
||||
*/
|
||||
u32 max_rdma_read_resources;
|
||||
|
||||
/*
|
||||
* The max depth per QP for initiation of RDMA Read
|
||||
* by this RNIC.
|
||||
*/
|
||||
u32 max_rdma_read_qp_depth;
|
||||
|
||||
/*
|
||||
* The maximum depth for initiation of RDMA Read
|
||||
* operations by this RNIC on all QPs
|
||||
*/
|
||||
u32 max_rdma_read_depth;
|
||||
u8 rq_overflow_handled;
|
||||
u32 can_modify_ird;
|
||||
u32 can_modify_ord;
|
||||
u32 max_mem_windows;
|
||||
u32 stag0_value;
|
||||
u8 zbva_support;
|
||||
u8 local_invalidate_fence;
|
||||
u32 cq_overflow_detection;
|
||||
};
|
||||
|
||||
struct iwch_dev {
|
||||
struct ib_device ibdev;
|
||||
struct cxio_rdev rdev;
|
||||
u32 device_cap_flags;
|
||||
struct iwch_rnic_attributes attr;
|
||||
struct idr cqidr;
|
||||
struct idr qpidr;
|
||||
struct idr mmidr;
|
||||
struct mtx lock;
|
||||
TAILQ_ENTRY(iwch_dev) entry;
|
||||
};
|
||||
|
||||
#ifndef container_of
|
||||
#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
|
||||
#endif
|
||||
|
||||
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct iwch_dev, ibdev);
|
||||
}
|
||||
|
||||
static inline int t3b_device(const struct iwch_dev *rhp __unused)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int t3a_device(const struct iwch_dev *rhp __unused)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
|
||||
{
|
||||
return idr_find(&rhp->cqidr, cqid);
|
||||
}
|
||||
|
||||
static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
|
||||
{
|
||||
return idr_find(&rhp->qpidr, qpid);
|
||||
}
|
||||
|
||||
static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
|
||||
{
|
||||
return idr_find(&rhp->mmidr, mmid);
|
||||
}
|
||||
|
||||
static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id)
|
||||
{
|
||||
int ret;
|
||||
u32 newid;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(idr, GFP_KERNEL)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
mtx_lock(&rhp->lock);
|
||||
ret = idr_get_new_above(idr, handle, id, &newid);
|
||||
WARN_ON(ret != 0);
|
||||
WARN_ON(!ret && newid != id);
|
||||
mtx_unlock(&rhp->lock);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
|
||||
{
|
||||
mtx_lock(&rhp->lock);
|
||||
idr_remove(idr, id);
|
||||
mtx_unlock(&rhp->lock);
|
||||
}
|
||||
|
||||
void iwch_ev_dispatch(struct iwch_dev *, struct mbuf *);
|
||||
void process_newconn(struct iw_cm_id *parent_cm_id, struct socket *child_so);
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,248 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
|
||||
#ifndef _IWCH_CM_H_
|
||||
#define _IWCH_CM_H_
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/iw_cm.h>
|
||||
#include <sys/refcount.h>
|
||||
#include <sys/condvar.h>
|
||||
#include <sys/proc.h>
|
||||
|
||||
|
||||
#define MPA_KEY_REQ "MPA ID Req Frame"
|
||||
#define MPA_KEY_REP "MPA ID Rep Frame"
|
||||
|
||||
#define MPA_MAX_PRIVATE_DATA 256
|
||||
#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
|
||||
#define MPA_REJECT 0x20
|
||||
#define MPA_CRC 0x40
|
||||
#define MPA_MARKERS 0x80
|
||||
#define MPA_FLAGS_MASK 0xE0
|
||||
|
||||
#define put_ep(ep) { \
|
||||
CTR4(KTR_IW_CXGB, "put_ep (via %s:%u) ep %p refcnt %d", __FUNCTION__, __LINE__, \
|
||||
ep, atomic_load_acq_int(&((ep)->refcount))); \
|
||||
if (refcount_release(&((ep)->refcount))) \
|
||||
__free_ep(ep); \
|
||||
}
|
||||
|
||||
#define get_ep(ep) { \
|
||||
CTR4(KTR_IW_CXGB, "get_ep (via %s:%u) ep %p, refcnt %d", __FUNCTION__, __LINE__, \
|
||||
ep, atomic_load_acq_int(&((ep)->refcount))); \
|
||||
refcount_acquire(&((ep)->refcount)); \
|
||||
}
|
||||
|
||||
struct mpa_message {
|
||||
u8 key[16];
|
||||
u8 flags;
|
||||
u8 revision;
|
||||
__be16 private_data_size;
|
||||
u8 private_data[0];
|
||||
};
|
||||
|
||||
struct terminate_message {
|
||||
u8 layer_etype;
|
||||
u8 ecode;
|
||||
__be16 hdrct_rsvd;
|
||||
u8 len_hdrs[0];
|
||||
};
|
||||
|
||||
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
|
||||
|
||||
enum iwch_layers_types {
|
||||
LAYER_RDMAP = 0x00,
|
||||
LAYER_DDP = 0x10,
|
||||
LAYER_MPA = 0x20,
|
||||
RDMAP_LOCAL_CATA = 0x00,
|
||||
RDMAP_REMOTE_PROT = 0x01,
|
||||
RDMAP_REMOTE_OP = 0x02,
|
||||
DDP_LOCAL_CATA = 0x00,
|
||||
DDP_TAGGED_ERR = 0x01,
|
||||
DDP_UNTAGGED_ERR = 0x02,
|
||||
DDP_LLP = 0x03
|
||||
};
|
||||
|
||||
enum iwch_rdma_ecodes {
|
||||
RDMAP_INV_STAG = 0x00,
|
||||
RDMAP_BASE_BOUNDS = 0x01,
|
||||
RDMAP_ACC_VIOL = 0x02,
|
||||
RDMAP_STAG_NOT_ASSOC = 0x03,
|
||||
RDMAP_TO_WRAP = 0x04,
|
||||
RDMAP_INV_VERS = 0x05,
|
||||
RDMAP_INV_OPCODE = 0x06,
|
||||
RDMAP_STREAM_CATA = 0x07,
|
||||
RDMAP_GLOBAL_CATA = 0x08,
|
||||
RDMAP_CANT_INV_STAG = 0x09,
|
||||
RDMAP_UNSPECIFIED = 0xff
|
||||
};
|
||||
|
||||
enum iwch_ddp_ecodes {
|
||||
DDPT_INV_STAG = 0x00,
|
||||
DDPT_BASE_BOUNDS = 0x01,
|
||||
DDPT_STAG_NOT_ASSOC = 0x02,
|
||||
DDPT_TO_WRAP = 0x03,
|
||||
DDPT_INV_VERS = 0x04,
|
||||
DDPU_INV_QN = 0x01,
|
||||
DDPU_INV_MSN_NOBUF = 0x02,
|
||||
DDPU_INV_MSN_RANGE = 0x03,
|
||||
DDPU_INV_MO = 0x04,
|
||||
DDPU_MSG_TOOBIG = 0x05,
|
||||
DDPU_INV_VERS = 0x06
|
||||
};
|
||||
|
||||
enum iwch_mpa_ecodes {
|
||||
MPA_CRC_ERR = 0x02,
|
||||
MPA_MARKER_ERR = 0x03
|
||||
};
|
||||
|
||||
enum iwch_ep_state {
|
||||
IDLE = 0,
|
||||
LISTEN,
|
||||
CONNECTING,
|
||||
MPA_REQ_WAIT,
|
||||
MPA_REQ_SENT,
|
||||
MPA_REQ_RCVD,
|
||||
MPA_REP_SENT,
|
||||
FPDU_MODE,
|
||||
ABORTING,
|
||||
CLOSING,
|
||||
MORIBUND,
|
||||
DEAD,
|
||||
};
|
||||
|
||||
enum iwch_ep_flags {
|
||||
PEER_ABORT_IN_PROGRESS = (1 << 0),
|
||||
ABORT_REQ_IN_PROGRESS = (1 << 1),
|
||||
};
|
||||
|
||||
struct iwch_ep_common {
|
||||
TAILQ_ENTRY(iwch_ep_common) entry;
|
||||
struct iw_cm_id *cm_id;
|
||||
struct iwch_qp *qp;
|
||||
struct toedev *tdev;
|
||||
enum iwch_ep_state state;
|
||||
u_int refcount;
|
||||
struct cv waitq;
|
||||
struct mtx lock;
|
||||
struct sockaddr_in local_addr;
|
||||
struct sockaddr_in remote_addr;
|
||||
int rpl_err;
|
||||
int rpl_done;
|
||||
struct thread *thread;
|
||||
struct socket *so;
|
||||
};
|
||||
|
||||
struct iwch_listen_ep {
|
||||
struct iwch_ep_common com;
|
||||
unsigned int stid;
|
||||
int backlog;
|
||||
};
|
||||
|
||||
struct iwch_ep {
|
||||
struct iwch_ep_common com;
|
||||
struct iwch_ep *parent_ep;
|
||||
struct callout timer;
|
||||
unsigned int atid;
|
||||
u32 hwtid;
|
||||
u32 snd_seq;
|
||||
u32 rcv_seq;
|
||||
struct l2t_entry *l2t;
|
||||
struct mbuf *mpa_mbuf;
|
||||
struct iwch_mpa_attributes mpa_attr;
|
||||
unsigned int mpa_pkt_len;
|
||||
u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
|
||||
u8 tos;
|
||||
u16 emss;
|
||||
u16 plen;
|
||||
u32 ird;
|
||||
u32 ord;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
|
||||
{
|
||||
return cm_id->provider_data;
|
||||
}
|
||||
|
||||
static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
|
||||
{
|
||||
return cm_id->provider_data;
|
||||
}
|
||||
|
||||
static inline int compute_wscale(int win)
|
||||
{
|
||||
int wscale = 0;
|
||||
|
||||
while (wscale < 14 && (65535<<wscale) < win)
|
||||
wscale++;
|
||||
return wscale;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
iwch_wait(struct cv *cv, struct mtx *lock, int *rpl_done)
|
||||
{
|
||||
mtx_lock(lock);
|
||||
if (!*rpl_done) {
|
||||
CTR0(KTR_IW_CXGB, "sleeping for rpl_done\n");
|
||||
cv_wait_unlock(cv, lock);
|
||||
}
|
||||
CTR1(KTR_IW_CXGB, "*rpl_done=%d\n", *rpl_done);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
iwch_wakeup(struct cv *cv, struct mtx *lock, int *rpl_done)
|
||||
{
|
||||
mtx_lock(lock);
|
||||
*rpl_done=1;
|
||||
CTR0(KTR_IW_CXGB, "wakeup for rpl_done\n");
|
||||
cv_broadcast(cv);
|
||||
mtx_unlock(lock);
|
||||
}
|
||||
|
||||
/* CM prototypes */
|
||||
|
||||
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
||||
int iwch_create_listen_ep(struct iw_cm_id *cm_id, int backlog);
|
||||
void iwch_destroy_listen_ep(struct iw_cm_id *cm_id);
|
||||
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
||||
int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
||||
int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, int flags);
|
||||
void __free_ep(struct iwch_ep_common *ep);
|
||||
void iwch_rearp(struct iwch_ep *ep);
|
||||
int iwch_ep_redirect(void *ctx, struct rtentry *old, struct rtentry *new, struct l2t_entry *l2t);
|
||||
|
||||
int iwch_cm_init(void);
|
||||
void iwch_cm_term(void);
|
||||
void iwch_cm_init_cpl(struct adapter *);
|
||||
void iwch_cm_term_cpl(struct adapter *);
|
||||
|
||||
#endif /* _IWCH_CM_H_ */
|
@ -1,267 +0,0 @@
|
||||
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_resource.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_user.h>
|
||||
|
||||
/*
|
||||
* Get one cq entry from cxio and map it to openib.
|
||||
*
|
||||
* Returns:
|
||||
* 0 cqe returned
|
||||
* -ENOBUFS EMPTY;
|
||||
* -EAGAIN caller must try again
|
||||
* any other neg errno fatal error
|
||||
*/
|
||||
static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
|
||||
struct ib_wc *wc)
|
||||
{
|
||||
struct iwch_qp *qhp = NULL;
|
||||
struct t3_cqe cqe, *rd_cqe;
|
||||
struct t3_wq *wq;
|
||||
u32 credit = 0;
|
||||
u8 cqe_flushed;
|
||||
u64 cookie;
|
||||
int ret = 1;
|
||||
|
||||
rd_cqe = cxio_next_cqe(&chp->cq);
|
||||
|
||||
if (!rd_cqe)
|
||||
return 0;
|
||||
|
||||
qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
|
||||
if (!qhp)
|
||||
wq = NULL;
|
||||
else {
|
||||
mtx_lock(&qhp->lock);
|
||||
wq = &(qhp->wq);
|
||||
}
|
||||
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
|
||||
&credit);
|
||||
if (t3a_device(chp->rhp) && credit) {
|
||||
CTR3(KTR_IW_CXGB, "%s updating %d cq credits on id %d", __FUNCTION__,
|
||||
credit, chp->cq.cqid);
|
||||
cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
ret = 1;
|
||||
|
||||
wc->wr_id = cookie;
|
||||
wc->qp = &qhp->ibqp;
|
||||
wc->vendor_err = CQE_STATUS(cqe);
|
||||
|
||||
CTR4(KTR_IW_CXGB, "iwch_poll_cq_one qpid 0x%x type %d opcode %d status 0x%x",
|
||||
CQE_QPID(cqe), CQE_TYPE(cqe),
|
||||
CQE_OPCODE(cqe), CQE_STATUS(cqe));
|
||||
CTR3(KTR_IW_CXGB, "wrid hi 0x%x lo 0x%x cookie 0x%llx",
|
||||
CQE_WRID_HI(cqe), CQE_WRID_LOW(cqe), (unsigned long long) cookie);
|
||||
|
||||
if (CQE_TYPE(cqe) == 0) {
|
||||
if (!CQE_STATUS(cqe))
|
||||
wc->byte_len = CQE_LEN(cqe);
|
||||
else
|
||||
wc->byte_len = 0;
|
||||
wc->opcode = IB_WC_RECV;
|
||||
} else {
|
||||
switch (CQE_OPCODE(cqe)) {
|
||||
case T3_RDMA_WRITE:
|
||||
wc->opcode = IB_WC_RDMA_WRITE;
|
||||
break;
|
||||
case T3_READ_REQ:
|
||||
wc->opcode = IB_WC_RDMA_READ;
|
||||
wc->byte_len = CQE_LEN(cqe);
|
||||
break;
|
||||
case T3_SEND:
|
||||
case T3_SEND_WITH_SE:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
case T3_BIND_MW:
|
||||
wc->opcode = IB_WC_BIND_MW;
|
||||
break;
|
||||
|
||||
/* these aren't supported yet */
|
||||
case T3_SEND_WITH_INV:
|
||||
case T3_SEND_WITH_SE_INV:
|
||||
case T3_LOCAL_INV:
|
||||
case T3_FAST_REGISTER:
|
||||
default:
|
||||
log(LOG_ERR, "Unexpected opcode %d "
|
||||
"in the CQE received for QPID=0x%0x\n",
|
||||
CQE_OPCODE(cqe), CQE_QPID(cqe));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (cqe_flushed)
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
else {
|
||||
|
||||
switch (CQE_STATUS(cqe)) {
|
||||
case TPT_ERR_SUCCESS:
|
||||
wc->status = IB_WC_SUCCESS;
|
||||
break;
|
||||
case TPT_ERR_STAG:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case TPT_ERR_PDID:
|
||||
wc->status = IB_WC_LOC_PROT_ERR;
|
||||
break;
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case TPT_ERR_WRAP:
|
||||
wc->status = IB_WC_GENERAL_ERR;
|
||||
break;
|
||||
case TPT_ERR_BOUND:
|
||||
wc->status = IB_WC_LOC_LEN_ERR;
|
||||
break;
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
wc->status = IB_WC_MW_BIND_ERR;
|
||||
break;
|
||||
case TPT_ERR_CRC:
|
||||
case TPT_ERR_MARKER:
|
||||
case TPT_ERR_PDU_LEN_ERR:
|
||||
case TPT_ERR_OUT_OF_RQE:
|
||||
case TPT_ERR_DDP_VERSION:
|
||||
case TPT_ERR_RDMA_VERSION:
|
||||
case TPT_ERR_DDP_QUEUE_NUM:
|
||||
case TPT_ERR_MSN:
|
||||
case TPT_ERR_TBIT:
|
||||
case TPT_ERR_MO:
|
||||
case TPT_ERR_MSN_RANGE:
|
||||
case TPT_ERR_IRD_OVERFLOW:
|
||||
case TPT_ERR_OPCODE:
|
||||
wc->status = IB_WC_FATAL_ERR;
|
||||
break;
|
||||
case TPT_ERR_SWFLUSH:
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
break;
|
||||
default:
|
||||
log(LOG_ERR, "Unexpected cqe_status 0x%x for "
|
||||
"QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (wq)
|
||||
mtx_unlock(&qhp->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_cq *chp;
|
||||
int npolled;
|
||||
int err = 0;
|
||||
|
||||
chp = to_iwch_cq(ibcq);
|
||||
rhp = chp->rhp;
|
||||
|
||||
mtx_lock(&chp->lock);
|
||||
for (npolled = 0; npolled < num_entries; ++npolled) {
|
||||
#ifdef DEBUG
|
||||
int i=0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Because T3 can post CQEs that are _not_ associated
|
||||
* with a WR, we might have to poll again after removing
|
||||
* one of these.
|
||||
*/
|
||||
do {
|
||||
err = iwch_poll_cq_one(rhp, chp, wc + npolled);
|
||||
#ifdef DEBUG
|
||||
PANIC_IF(++i > 1000);
|
||||
#endif
|
||||
} while (err == -EAGAIN);
|
||||
if (err <= 0)
|
||||
break;
|
||||
}
|
||||
mtx_unlock(&chp->lock);
|
||||
|
||||
if (err < 0) {
|
||||
return err;
|
||||
} else {
|
||||
return npolled;
|
||||
}
|
||||
}
|
||||
#endif
|
@ -1,277 +0,0 @@
|
||||
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#if defined(INVARIANTS) && defined(TCP_OFFLOAD)
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_resource.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_user.h>
|
||||
|
||||
static int
|
||||
cxio_rdma_get_mem(struct cxio_rdev *rdev, struct ch_mem_range *m)
|
||||
{
|
||||
struct adapter *sc = rdev->adap;
|
||||
struct mc7 *mem;
|
||||
|
||||
if ((m->addr & 7) || (m->len & 7))
|
||||
return (EINVAL);
|
||||
if (m->mem_id == MEM_CM)
|
||||
mem = &sc->cm;
|
||||
else if (m->mem_id == MEM_PMRX)
|
||||
mem = &sc->pmrx;
|
||||
else if (m->mem_id == MEM_PMTX)
|
||||
mem = &sc->pmtx;
|
||||
else
|
||||
return (EINVAL);
|
||||
|
||||
return (t3_mc7_bd_read(mem, m->addr/8, m->len/8, (u64 *)m->buf));
|
||||
}
|
||||
|
||||
void cxio_dump_tpt(struct cxio_rdev *rdev, uint32_t stag)
|
||||
{
|
||||
struct ch_mem_range m;
|
||||
u64 *data;
|
||||
u32 addr;
|
||||
int rc;
|
||||
int size = 32;
|
||||
|
||||
m.buf = malloc(size, M_DEVBUF, M_NOWAIT);
|
||||
if (m.buf == NULL) {
|
||||
CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
m.mem_id = MEM_PMRX;
|
||||
m.addr = (stag >> 8) * 32 + rdev->rnic_info.tpt_base;
|
||||
m.len = size;
|
||||
CTR3(KTR_IW_CXGB, "%s TPT addr 0x%x len %d", __FUNCTION__, m.addr, m.len);
|
||||
|
||||
rc = cxio_rdma_get_mem(rdev, &m);
|
||||
if (rc) {
|
||||
CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
|
||||
free(m.buf, M_DEVBUF);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (u64 *)m.buf;
|
||||
addr = m.addr;
|
||||
while (size > 0) {
|
||||
CTR2(KTR_IW_CXGB, "TPT %08x: %016llx", addr, (unsigned long long) *data);
|
||||
size -= 8;
|
||||
data++;
|
||||
addr += 8;
|
||||
}
|
||||
free(m.buf, M_DEVBUF);
|
||||
}
|
||||
|
||||
void cxio_dump_pbl(struct cxio_rdev *rdev, uint32_t pbl_addr, uint32_t len, u8 shift)
|
||||
{
|
||||
struct ch_mem_range m;
|
||||
u64 *data;
|
||||
u32 addr;
|
||||
int rc;
|
||||
int size, npages;
|
||||
|
||||
shift += 12;
|
||||
npages = (len + (1ULL << shift) - 1) >> shift;
|
||||
size = npages * sizeof(u64);
|
||||
m.buf = malloc(size, M_DEVBUF, M_NOWAIT);
|
||||
if (m.buf == NULL) {
|
||||
CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
m.mem_id = MEM_PMRX;
|
||||
m.addr = pbl_addr;
|
||||
m.len = size;
|
||||
CTR4(KTR_IW_CXGB, "%s PBL addr 0x%x len %d depth %d",
|
||||
__FUNCTION__, m.addr, m.len, npages);
|
||||
|
||||
rc = cxio_rdma_get_mem(rdev, &m);
|
||||
if (rc) {
|
||||
CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
|
||||
free(m.buf, M_DEVBUF);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (u64 *)m.buf;
|
||||
addr = m.addr;
|
||||
while (size > 0) {
|
||||
CTR2(KTR_IW_CXGB, "PBL %08x: %016llx", addr, (unsigned long long) *data);
|
||||
size -= 8;
|
||||
data++;
|
||||
addr += 8;
|
||||
}
|
||||
free(m.buf, M_DEVBUF);
|
||||
}
|
||||
|
||||
void cxio_dump_wqe(union t3_wr *wqe)
|
||||
{
|
||||
uint64_t *data = (uint64_t *)wqe;
|
||||
uint32_t size = (uint32_t)(be64toh(*data) & 0xff);
|
||||
|
||||
if (size == 0)
|
||||
size = 8;
|
||||
while (size > 0) {
|
||||
CTR2(KTR_IW_CXGB, "WQE %p: %016llx", data,
|
||||
(unsigned long long) be64toh(*data));
|
||||
size--;
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
void cxio_dump_wce(struct t3_cqe *wce)
|
||||
{
|
||||
uint64_t *data = (uint64_t *)wce;
|
||||
int size = sizeof(*wce);
|
||||
|
||||
while (size > 0) {
|
||||
CTR2(KTR_IW_CXGB, "WCE %p: %016llx", data,
|
||||
(unsigned long long) be64toh(*data));
|
||||
size -= 8;
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
void cxio_dump_rqt(struct cxio_rdev *rdev, uint32_t hwtid, int nents)
|
||||
{
|
||||
struct ch_mem_range m;
|
||||
int size = nents * 64;
|
||||
u64 *data;
|
||||
u32 addr;
|
||||
int rc;
|
||||
|
||||
m.buf = malloc(size, M_DEVBUF, M_NOWAIT);
|
||||
if (m.buf == NULL) {
|
||||
CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
m.mem_id = MEM_PMRX;
|
||||
m.addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
|
||||
m.len = size;
|
||||
CTR3(KTR_IW_CXGB, "%s RQT addr 0x%x len %d", __FUNCTION__, m.addr, m.len);
|
||||
|
||||
rc = cxio_rdma_get_mem(rdev, &m);
|
||||
if (rc) {
|
||||
CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
|
||||
free(m.buf, M_DEVBUF);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (u64 *)m.buf;
|
||||
addr = m.addr;
|
||||
while (size > 0) {
|
||||
CTR2(KTR_IW_CXGB, "RQT %08x: %016llx", addr, (unsigned long long) *data);
|
||||
size -= 8;
|
||||
data++;
|
||||
addr += 8;
|
||||
}
|
||||
free(m.buf, M_DEVBUF);
|
||||
}
|
||||
|
||||
void cxio_dump_tcb(struct cxio_rdev *rdev, uint32_t hwtid)
|
||||
{
|
||||
struct ch_mem_range m;
|
||||
int size = TCB_SIZE;
|
||||
uint32_t *data;
|
||||
uint32_t addr;
|
||||
int rc;
|
||||
|
||||
m.buf = malloc(size, M_DEVBUF, M_NOWAIT);
|
||||
if (m.buf == NULL) {
|
||||
CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
m.mem_id = MEM_CM;
|
||||
m.addr = hwtid * size;
|
||||
m.len = size;
|
||||
CTR3(KTR_IW_CXGB, "%s TCB %d len %d", __FUNCTION__, m.addr, m.len);
|
||||
|
||||
rc = cxio_rdma_get_mem(rdev, &m);
|
||||
if (rc) {
|
||||
CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
|
||||
free(m.buf, M_DEVBUF);
|
||||
return;
|
||||
}
|
||||
|
||||
data = (uint32_t *)m.buf;
|
||||
addr = m.addr;
|
||||
while (size > 0) {
|
||||
printf("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
addr,
|
||||
*(data+2), *(data+3), *(data),*(data+1),
|
||||
*(data+6), *(data+7), *(data+4), *(data+5));
|
||||
size -= 32;
|
||||
data += 8;
|
||||
addr += 32;
|
||||
}
|
||||
free(m.buf, M_DEVBUF);
|
||||
}
|
||||
#endif
|
@ -1,261 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_resource.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_user.h>
|
||||
|
||||
static void
|
||||
post_qp_event(struct iwch_dev *rnicp, struct iwch_qp *qhp, struct iwch_cq *chp,
|
||||
struct respQ_msg_t *rsp_msg,
|
||||
enum ib_event_type ib_event,
|
||||
int send_term)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct iwch_qp_attributes attrs;
|
||||
|
||||
mtx_lock(&rnicp->lock);
|
||||
|
||||
if (!qhp) {
|
||||
CTR3(KTR_IW_CXGB, "%s unaffiliated error 0x%x qpid 0x%x\n",
|
||||
__func__, CQE_STATUS(rsp_msg->cqe),
|
||||
CQE_QPID(rsp_msg->cqe));
|
||||
mtx_unlock(&rnicp->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
|
||||
(qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
|
||||
CTR4(KTR_IW_CXGB, "%s AE received after RTS - "
|
||||
"qp state %d qpid 0x%x status 0x%x", __FUNCTION__,
|
||||
qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
|
||||
mtx_unlock(&rnicp->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
log(LOG_ERR, "%s - AE qpid 0x%x opcode %d status 0x%x "
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
|
||||
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
|
||||
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
|
||||
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
|
||||
|
||||
mtx_unlock(&rnicp->lock);
|
||||
|
||||
if (qhp->attr.state == IWCH_QP_STATE_RTS) {
|
||||
attrs.next_state = IWCH_QP_STATE_TERMINATE;
|
||||
iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 1);
|
||||
if (send_term)
|
||||
iwch_post_terminate(qhp, rsp_msg);
|
||||
}
|
||||
|
||||
event.event = ib_event;
|
||||
event.device = chp->ibcq.device;
|
||||
if (ib_event == IB_EVENT_CQ_ERR)
|
||||
event.element.cq = &chp->ibcq;
|
||||
else
|
||||
event.element.qp = &qhp->ibqp;
|
||||
|
||||
if (qhp->ibqp.event_handler)
|
||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
}
|
||||
|
||||
void
|
||||
iwch_ev_dispatch(struct iwch_dev *rnicp, struct mbuf *m)
|
||||
{
|
||||
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) m->m_data;
|
||||
struct iwch_cq *chp;
|
||||
struct iwch_qp *qhp;
|
||||
u32 cqid = RSPQ_CQID(rsp_msg);
|
||||
|
||||
mtx_lock(&rnicp->lock);
|
||||
chp = get_chp(rnicp, cqid);
|
||||
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
|
||||
if (!chp || !qhp) {
|
||||
log(LOG_ERR,"BAD AE cqid 0x%x qpid 0x%x opcode %d "
|
||||
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
|
||||
cqid, CQE_QPID(rsp_msg->cqe),
|
||||
CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
|
||||
CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
|
||||
CQE_WRID_LOW(rsp_msg->cqe));
|
||||
mtx_unlock(&rnicp->lock);
|
||||
return;
|
||||
}
|
||||
iwch_qp_add_ref(&qhp->ibqp);
|
||||
mtx_lock(&chp->lock);
|
||||
++chp->refcnt;
|
||||
mtx_unlock(&chp->lock);
|
||||
mtx_unlock(&rnicp->lock);
|
||||
|
||||
/*
|
||||
* 1) completion of our sending a TERMINATE.
|
||||
* 2) incoming TERMINATE message.
|
||||
*/
|
||||
if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
|
||||
(CQE_STATUS(rsp_msg->cqe) == 0)) {
|
||||
if (SQ_TYPE(rsp_msg->cqe)) {
|
||||
CTR3(KTR_IW_CXGB, "%s QPID 0x%x ep %p disconnecting",
|
||||
__FUNCTION__, qhp->wq.qpid, qhp->ep);
|
||||
iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
|
||||
} else {
|
||||
CTR2(KTR_IW_CXGB, "%s post REQ_ERR AE QPID 0x%x", __FUNCTION__,
|
||||
qhp->wq.qpid);
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg,
|
||||
IB_EVENT_QP_REQ_ERR, 0);
|
||||
iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Bad incoming Read request */
|
||||
if (SQ_TYPE(rsp_msg->cqe) &&
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Bad incoming write */
|
||||
if (RQ_TYPE(rsp_msg->cqe) &&
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (CQE_STATUS(rsp_msg->cqe)) {
|
||||
|
||||
/* Completion Events */
|
||||
case TPT_ERR_SUCCESS:
|
||||
#if 0
|
||||
/*
|
||||
* Confirm the destination entry if this is a RECV completion.
|
||||
*/
|
||||
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
|
||||
dst_confirm(qhp->ep->dst);
|
||||
#endif
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
break;
|
||||
|
||||
case TPT_ERR_STAG:
|
||||
case TPT_ERR_PDID:
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
case TPT_ERR_WRAP:
|
||||
case TPT_ERR_BOUND:
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
|
||||
break;
|
||||
|
||||
/* Device Fatal Errors */
|
||||
case TPT_ERR_ECC:
|
||||
case TPT_ERR_ECC_PSTAG:
|
||||
case TPT_ERR_INTERNAL_ERR:
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
|
||||
break;
|
||||
|
||||
/* QP Fatal Errors */
|
||||
case TPT_ERR_OUT_OF_RQE:
|
||||
case TPT_ERR_PBL_ADDR_BOUND:
|
||||
case TPT_ERR_CRC:
|
||||
case TPT_ERR_MARKER:
|
||||
case TPT_ERR_PDU_LEN_ERR:
|
||||
case TPT_ERR_DDP_VERSION:
|
||||
case TPT_ERR_RDMA_VERSION:
|
||||
case TPT_ERR_OPCODE:
|
||||
case TPT_ERR_DDP_QUEUE_NUM:
|
||||
case TPT_ERR_MSN:
|
||||
case TPT_ERR_TBIT:
|
||||
case TPT_ERR_MO:
|
||||
case TPT_ERR_MSN_GAP:
|
||||
case TPT_ERR_MSN_RANGE:
|
||||
case TPT_ERR_RQE_ADDR_BOUND:
|
||||
case TPT_ERR_IRD_OVERFLOW:
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
log(LOG_ERR,"Unknown T3 status 0x%x QPID 0x%x\n",
|
||||
CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
|
||||
post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
|
||||
break;
|
||||
}
|
||||
done:
|
||||
mtx_lock(&chp->lock);
|
||||
if (--chp->refcnt == 0)
|
||||
wakeup(chp);
|
||||
mtx_unlock(&chp->lock);
|
||||
iwch_qp_rem_ref(&qhp->ibqp);
|
||||
}
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,274 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef __CXIO_HAL_H__
|
||||
#define __CXIO_HAL_H__
|
||||
#include <sys/condvar.h>
|
||||
#include <sys/ktr.h>
|
||||
|
||||
#define T3_CTRL_QP_ID FW_RI_SGEEC_START
|
||||
#define T3_CTL_QP_TID FW_RI_TID_START
|
||||
#define T3_CTRL_QP_SIZE_LOG2 8
|
||||
#define T3_CTRL_CQ_ID 0
|
||||
|
||||
/* TBD */
|
||||
#define T3_MAX_NUM_RI (1<<15)
|
||||
#define T3_MAX_NUM_QP (1<<15)
|
||||
#define T3_MAX_NUM_CQ (1<<15)
|
||||
#define T3_MAX_NUM_PD (1<<15)
|
||||
#define T3_MAX_PBL_SIZE 256
|
||||
#define T3_MAX_RQ_SIZE 1024
|
||||
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
|
||||
#define T3_MAX_CQ_DEPTH 65536
|
||||
#define T3_MAX_NUM_STAG (1<<15)
|
||||
#define T3_MAX_MR_SIZE 0x100000000ULL
|
||||
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
|
||||
|
||||
#define T3_STAG_UNSET 0xffffffff
|
||||
|
||||
#define T3_MAX_DEV_NAME_LEN 32
|
||||
|
||||
struct cxio_hal_ctrl_qp {
|
||||
u32 wptr;
|
||||
u32 rptr;
|
||||
struct mtx lock; /* for the wtpr, can sleep */
|
||||
union t3_wr *workq; /* the work request queue */
|
||||
bus_addr_t dma_addr; /* pci bus address of the workq */
|
||||
void *doorbell;
|
||||
};
|
||||
|
||||
struct cxio_hal_resource {
|
||||
struct buf_ring *tpt_fifo;
|
||||
struct mtx tpt_fifo_lock;
|
||||
struct buf_ring *qpid_fifo;
|
||||
struct mtx qpid_fifo_lock;
|
||||
struct buf_ring *cqid_fifo;
|
||||
struct mtx cqid_fifo_lock;
|
||||
struct buf_ring *pdid_fifo;
|
||||
struct mtx pdid_fifo_lock;
|
||||
};
|
||||
|
||||
struct cxio_qpid {
|
||||
TAILQ_ENTRY(cxio_qpid) entry;
|
||||
u32 qpid;
|
||||
};
|
||||
|
||||
struct cxio_ucontext {
|
||||
TAILQ_HEAD(, cxio_qpid) qpids;
|
||||
struct mtx lock;
|
||||
};
|
||||
|
||||
struct cxio_rdev {
|
||||
struct adapter *adap;
|
||||
struct rdma_info rnic_info;
|
||||
struct cxio_hal_resource *rscp;
|
||||
struct cxio_hal_ctrl_qp ctrl_qp;
|
||||
unsigned long qpshift;
|
||||
u32 qpnr;
|
||||
u32 qpmask;
|
||||
struct cxio_ucontext uctx;
|
||||
struct gen_pool *pbl_pool;
|
||||
struct gen_pool *rqt_pool;
|
||||
struct ifnet *ifp;
|
||||
TAILQ_ENTRY(cxio_rdev) entry;
|
||||
};
|
||||
|
||||
static __inline int
|
||||
cxio_num_stags(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
|
||||
}
|
||||
|
||||
typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
|
||||
struct mbuf * m);
|
||||
|
||||
#define RSPQ_CQID(rsp) (be32toh(rsp->cq_ptrid) & 0xffff)
|
||||
#define RSPQ_CQPTR(rsp) ((be32toh(rsp->cq_ptrid) >> 16) & 0xffff)
|
||||
#define RSPQ_GENBIT(rsp) ((be32toh(rsp->flags) >> 16) & 1)
|
||||
#define RSPQ_OVERFLOW(rsp) ((be32toh(rsp->flags) >> 17) & 1)
|
||||
#define RSPQ_AN(rsp) ((be32toh(rsp->flags) >> 18) & 1)
|
||||
#define RSPQ_SE(rsp) ((be32toh(rsp->flags) >> 19) & 1)
|
||||
#define RSPQ_NOTIFY(rsp) ((be32toh(rsp->flags) >> 20) & 1)
|
||||
#define RSPQ_CQBRANCH(rsp) ((be32toh(rsp->flags) >> 21) & 1)
|
||||
#define RSPQ_CREDIT_THRESH(rsp) ((be32toh(rsp->flags) >> 22) & 1)
|
||||
|
||||
struct respQ_msg_t {
|
||||
__be32 flags; /* flit 0 */
|
||||
__be32 cq_ptrid;
|
||||
__be64 rsvd; /* flit 1 */
|
||||
struct t3_cqe cqe; /* flits 2-3 */
|
||||
};
|
||||
|
||||
enum t3_cq_opcode {
|
||||
CQ_ARM_AN = 0x2,
|
||||
CQ_ARM_SE = 0x6,
|
||||
CQ_FORCE_AN = 0x3,
|
||||
CQ_CREDIT_UPDATE = 0x7
|
||||
};
|
||||
|
||||
int cxio_rdev_open(struct cxio_rdev *rdev);
|
||||
void cxio_rdev_close(struct cxio_rdev *rdev);
|
||||
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
|
||||
enum t3_cq_opcode op, u32 credit);
|
||||
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
|
||||
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
|
||||
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
|
||||
struct cxio_ucontext *uctx);
|
||||
int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
|
||||
struct cxio_ucontext *uctx);
|
||||
int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
|
||||
int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
|
||||
u32 pbl_addr, u32 pbl_size);
|
||||
int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
|
||||
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
|
||||
u8 page_size, u32 pbl_size, u32 pbl_addr);
|
||||
int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
|
||||
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
|
||||
u8 page_size, u32 pbl_size, u32 pbl_addr);
|
||||
int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
|
||||
u32 pbl_addr);
|
||||
int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
|
||||
int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
|
||||
int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr,
|
||||
struct socket *so);
|
||||
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
|
||||
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
|
||||
int cxio_hal_init(struct adapter *);
|
||||
void cxio_hal_uninit(struct adapter *);
|
||||
void cxio_hal_exit(void);
|
||||
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
|
||||
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
|
||||
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
|
||||
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
|
||||
void cxio_flush_hw_cq(struct t3_cq *cq);
|
||||
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
||||
u8 *cqe_flushed, u64 *cookie, u32 *credit);
|
||||
|
||||
#define MOD "iw_cxgb: "
|
||||
|
||||
#ifdef INVARIANTS
|
||||
void cxio_dump_tpt(struct cxio_rdev *rev, u32 stag);
|
||||
void cxio_dump_pbl(struct cxio_rdev *rev, u32 pbl_addr, uint32_t len, u8 shift);
|
||||
void cxio_dump_wqe(union t3_wr *wqe);
|
||||
void cxio_dump_wce(struct t3_cqe *wce);
|
||||
void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents);
|
||||
void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid);
|
||||
#endif
|
||||
|
||||
#define cxfree(a) free((a), M_DEVBUF);
|
||||
|
||||
#include <sys/blist.h>
|
||||
struct gen_pool {
|
||||
blist_t gen_list;
|
||||
daddr_t gen_base;
|
||||
int gen_chunk_shift;
|
||||
struct mtx gen_lock;
|
||||
};
|
||||
|
||||
static __inline struct gen_pool *
|
||||
gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
|
||||
{
|
||||
struct gen_pool *gp;
|
||||
|
||||
gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
|
||||
if (gp == NULL)
|
||||
return (NULL);
|
||||
|
||||
memset(gp, 0, sizeof(struct gen_pool));
|
||||
gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
|
||||
if (gp->gen_list == NULL) {
|
||||
free(gp, M_DEVBUF);
|
||||
return (NULL);
|
||||
}
|
||||
blist_free(gp->gen_list, 0, len >> chunk_shift);
|
||||
gp->gen_base = base;
|
||||
gp->gen_chunk_shift = chunk_shift;
|
||||
mtx_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
|
||||
|
||||
return (gp);
|
||||
}
|
||||
|
||||
static __inline unsigned long
|
||||
gen_pool_alloc(struct gen_pool *gp, int size)
|
||||
{
|
||||
int chunks;
|
||||
daddr_t blkno;
|
||||
|
||||
chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
|
||||
mtx_lock(&gp->gen_lock);
|
||||
blkno = blist_alloc(gp->gen_list, chunks);
|
||||
mtx_unlock(&gp->gen_lock);
|
||||
|
||||
if (blkno == SWAPBLK_NONE)
|
||||
return (0);
|
||||
|
||||
return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
|
||||
{
|
||||
int chunks;
|
||||
daddr_t blkno;
|
||||
|
||||
chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
|
||||
blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
|
||||
mtx_lock(&gp->gen_lock);
|
||||
blist_free(gp->gen_list, blkno, chunks);
|
||||
mtx_unlock(&gp->gen_lock);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
gen_pool_destroy(struct gen_pool *gp)
|
||||
{
|
||||
blist_destroy(gp->gen_list);
|
||||
free(gp, M_DEVBUF);
|
||||
}
|
||||
|
||||
#define cxio_wait(ctx, lockp, cond) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
mtx_lock(lockp); \
|
||||
while (!cond) { \
|
||||
msleep(ctx, lockp, 0, "cxio_wait", hz); \
|
||||
if (SIGPENDING(curthread)) { \
|
||||
__ret = ERESTART; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
mtx_unlock(lockp); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define KTR_IW_CXGB KTR_SPARE3
|
||||
|
||||
#endif
|
@ -1,22 +0,0 @@
|
||||
#ifndef __IB_INTFC_H__
|
||||
#define __IB_INTFC_H__
|
||||
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#undef prefetch
|
||||
#undef WARN_ON
|
||||
#undef max_t
|
||||
#undef udelay
|
||||
#undef le32_to_cpu
|
||||
#undef le16_to_cpu
|
||||
#undef cpu_to_le32
|
||||
#undef swab32
|
||||
#undef container_of
|
||||
|
||||
#undef LIST_HEAD
|
||||
#define LIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *lh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#endif /* __IB_INTFC_H__ */
|
@ -1,239 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_resource.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_user.h>
|
||||
|
||||
static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
|
||||
{
|
||||
u32 mmid;
|
||||
|
||||
mhp->attr.state = 1;
|
||||
mhp->attr.stag = stag;
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
CTR3(KTR_IW_CXGB, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
|
||||
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
|
||||
}
|
||||
|
||||
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp,
|
||||
int shift)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
if (cxio_register_phys_mem(&rhp->rdev,
|
||||
&stag, mhp->attr.pdid,
|
||||
mhp->attr.perms,
|
||||
mhp->attr.zbva,
|
||||
mhp->attr.va_fbo,
|
||||
mhp->attr.len,
|
||||
shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr))
|
||||
return (-ENOMEM);
|
||||
|
||||
ret = iwch_finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp,
|
||||
int shift,
|
||||
int npages)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
/* We could support this... */
|
||||
if (npages > mhp->attr.pbl_size)
|
||||
return (-ENOMEM);
|
||||
|
||||
stag = mhp->attr.stag;
|
||||
if (cxio_reregister_phys_mem(&rhp->rdev,
|
||||
&stag, mhp->attr.pdid,
|
||||
mhp->attr.perms,
|
||||
mhp->attr.zbva,
|
||||
mhp->attr.va_fbo,
|
||||
mhp->attr.len,
|
||||
shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr))
|
||||
return (-ENOMEM);
|
||||
|
||||
ret = iwch_finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
|
||||
{
|
||||
mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
|
||||
npages << 3);
|
||||
|
||||
if (!mhp->attr.pbl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
mhp->attr.pbl_size = npages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwch_free_pbl(struct iwch_mr *mhp)
|
||||
{
|
||||
cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
}
|
||||
|
||||
int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
|
||||
{
|
||||
return cxio_write_pbl(&mhp->rhp->rdev, pages,
|
||||
mhp->attr.pbl_addr + (offset << 3), npages);
|
||||
}
|
||||
|
||||
int build_phys_page_list(struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf,
|
||||
u64 *iova_start,
|
||||
u64 *total_size,
|
||||
int *npages,
|
||||
int *shift,
|
||||
__be64 **page_list)
|
||||
{
|
||||
u64 mask;
|
||||
int i, j, n;
|
||||
|
||||
mask = 0;
|
||||
*total_size = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i) {
|
||||
if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
|
||||
return (-EINVAL);
|
||||
if (i != 0 && i != num_phys_buf - 1 &&
|
||||
(buffer_list[i].size & ~PAGE_MASK))
|
||||
return (-EINVAL);
|
||||
*total_size += buffer_list[i].size;
|
||||
if (i > 0)
|
||||
mask |= buffer_list[i].addr;
|
||||
else
|
||||
mask |= buffer_list[i].addr & PAGE_MASK;
|
||||
if (i != num_phys_buf - 1)
|
||||
mask |= buffer_list[i].addr + buffer_list[i].size;
|
||||
else
|
||||
mask |= (buffer_list[i].addr + buffer_list[i].size +
|
||||
PAGE_SIZE - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
if (*total_size > 0xFFFFFFFFULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
/* Find largest page shift we can use to cover buffers */
|
||||
for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
|
||||
if ((1ULL << *shift) & mask)
|
||||
break;
|
||||
|
||||
buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
|
||||
buffer_list[0].addr &= ~0ull << *shift;
|
||||
|
||||
*npages = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i)
|
||||
*npages += (buffer_list[i].size +
|
||||
(1ULL << *shift) - 1) >> *shift;
|
||||
|
||||
if (!*npages)
|
||||
return (-EINVAL);
|
||||
|
||||
*page_list = kmalloc(sizeof(u64) * *npages, M_NOWAIT);
|
||||
if (!*page_list)
|
||||
return (-ENOMEM);
|
||||
|
||||
n = 0;
|
||||
for (i = 0; i < num_phys_buf; ++i)
|
||||
for (j = 0;
|
||||
j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
|
||||
++j)
|
||||
(*page_list)[n++] = htobe64(buffer_list[i].addr +
|
||||
((u64) j << *shift));
|
||||
|
||||
CTR6(KTR_IW_CXGB, "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d",
|
||||
__FUNCTION__, (unsigned long long) *iova_start,
|
||||
(unsigned long long) mask, *shift, (unsigned long long) *total_size,
|
||||
*npages);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,362 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef __IWCH_PROVIDER_H__
|
||||
#define __IWCH_PROVIDER_H__
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
struct iwch_pd {
|
||||
struct ib_pd ibpd;
|
||||
u32 pdid;
|
||||
struct iwch_dev *rhp;
|
||||
};
|
||||
|
||||
#ifndef container_of
|
||||
#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
|
||||
#endif
|
||||
static __inline struct iwch_pd *
|
||||
to_iwch_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct iwch_pd, ibpd);
|
||||
}
|
||||
|
||||
struct tpt_attributes {
|
||||
u32 stag;
|
||||
u32 state:1;
|
||||
u32 type:2;
|
||||
u32 rsvd:1;
|
||||
enum tpt_mem_perm perms;
|
||||
u32 remote_invaliate_disable:1;
|
||||
u32 zbva:1;
|
||||
u32 mw_bind_enable:1;
|
||||
u32 page_size:5;
|
||||
|
||||
u32 pdid;
|
||||
u32 qpid;
|
||||
u32 pbl_addr;
|
||||
u32 len;
|
||||
u64 va_fbo;
|
||||
u32 pbl_size;
|
||||
};
|
||||
|
||||
struct iwch_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
struct iwch_dev *rhp;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
};
|
||||
|
||||
typedef struct iwch_mw iwch_mw_handle;
|
||||
|
||||
static __inline struct iwch_mr *
|
||||
to_iwch_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct iwch_mr, ibmr);
|
||||
}
|
||||
|
||||
struct iwch_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct iwch_dev *rhp;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
};
|
||||
|
||||
static __inline struct iwch_mw *
|
||||
to_iwch_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
return container_of(ibmw, struct iwch_mw, ibmw);
|
||||
}
|
||||
|
||||
struct iwch_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct iwch_dev *rhp;
|
||||
struct t3_cq cq;
|
||||
struct mtx lock;
|
||||
int refcnt;
|
||||
u32 /* __user */ *user_rptr_addr;
|
||||
};
|
||||
|
||||
static __inline struct iwch_cq *
|
||||
to_iwch_cq(struct ib_cq *ibcq)
|
||||
{
|
||||
return container_of(ibcq, struct iwch_cq, ibcq);
|
||||
}
|
||||
|
||||
enum IWCH_QP_FLAGS {
|
||||
QP_QUIESCED = 0x01
|
||||
};
|
||||
|
||||
struct iwch_mpa_attributes {
|
||||
u8 initiator;
|
||||
u8 recv_marker_enabled;
|
||||
u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
|
||||
u8 crc_enabled;
|
||||
u8 version; /* 0 or 1 */
|
||||
};
|
||||
|
||||
struct iwch_qp_attributes {
|
||||
u32 scq;
|
||||
u32 rcq;
|
||||
u32 sq_num_entries;
|
||||
u32 rq_num_entries;
|
||||
u32 sq_max_sges;
|
||||
u32 sq_max_sges_rdma_write;
|
||||
u32 rq_max_sges;
|
||||
u32 state;
|
||||
u8 enable_rdma_read;
|
||||
u8 enable_rdma_write; /* enable inbound Read Resp. */
|
||||
u8 enable_bind;
|
||||
u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
|
||||
/*
|
||||
* Next QP state. If specify the current state, only the
|
||||
* QP attributes will be modified.
|
||||
*/
|
||||
u32 max_ord;
|
||||
u32 max_ird;
|
||||
u32 pd; /* IN */
|
||||
u32 next_state;
|
||||
char terminate_buffer[52];
|
||||
u32 terminate_msg_len;
|
||||
u8 is_terminate_local;
|
||||
struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
|
||||
struct iwch_ep *llp_stream_handle;
|
||||
char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
|
||||
u32 stream_msg_buf_len; /* Only on Idle -> RTS */
|
||||
};
|
||||
|
||||
struct iwch_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_ep *ep;
|
||||
struct iwch_qp_attributes attr;
|
||||
struct t3_wq wq;
|
||||
struct mtx lock;
|
||||
int refcnt;
|
||||
enum IWCH_QP_FLAGS flags;
|
||||
struct callout timer;
|
||||
};
|
||||
|
||||
static __inline int
|
||||
qp_quiesced(struct iwch_qp *qhp)
|
||||
{
|
||||
return qhp->flags & QP_QUIESCED;
|
||||
}
|
||||
|
||||
static __inline struct iwch_qp *
|
||||
to_iwch_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct iwch_qp, ibqp);
|
||||
}
|
||||
|
||||
void iwch_qp_add_ref(struct ib_qp *qp);
|
||||
void iwch_qp_rem_ref(struct ib_qp *qp);
|
||||
|
||||
struct iwch_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct cxio_ucontext uctx;
|
||||
u32 key;
|
||||
struct mtx mmap_lock;
|
||||
TAILQ_HEAD( ,iwch_mm_entry) mmaps;
|
||||
};
|
||||
|
||||
static __inline struct iwch_ucontext *
|
||||
to_iwch_ucontext(struct ib_ucontext *c)
|
||||
{
|
||||
return container_of(c, struct iwch_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
struct iwch_mm_entry {
|
||||
TAILQ_ENTRY(iwch_mm_entry) entry;
|
||||
u64 addr;
|
||||
u32 key;
|
||||
unsigned len;
|
||||
};
|
||||
|
||||
static __inline struct iwch_mm_entry *
|
||||
remove_mmap(struct iwch_ucontext *ucontext,
|
||||
u32 key, unsigned len)
|
||||
{
|
||||
struct iwch_mm_entry *tmp, *mm;
|
||||
|
||||
mtx_lock(&ucontext->mmap_lock);
|
||||
TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
|
||||
if (mm->key == key && mm->len == len) {
|
||||
TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
|
||||
mtx_unlock(&ucontext->mmap_lock);
|
||||
CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
|
||||
key, (unsigned long long) mm->addr, mm->len);
|
||||
return mm;
|
||||
}
|
||||
}
|
||||
mtx_unlock(&ucontext->mmap_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
insert_mmap(struct iwch_ucontext *ucontext,
|
||||
struct iwch_mm_entry *mm)
|
||||
{
|
||||
mtx_lock(&ucontext->mmap_lock);
|
||||
CTR4(KTR_IW_CXGB, "%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
|
||||
mm->key, (unsigned long long) mm->addr, mm->len);
|
||||
TAILQ_INSERT_TAIL(&ucontext->mmaps, mm, entry);
|
||||
mtx_unlock(&ucontext->mmap_lock);
|
||||
}
|
||||
|
||||
enum iwch_qp_attr_mask {
|
||||
IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
|
||||
IWCH_QP_ATTR_MAX_ORD = 1 << 11,
|
||||
IWCH_QP_ATTR_MAX_IRD = 1 << 12,
|
||||
IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
|
||||
IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
|
||||
IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
|
||||
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
|
||||
IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
|
||||
IWCH_QP_ATTR_MAX_ORD |
|
||||
IWCH_QP_ATTR_MAX_IRD |
|
||||
IWCH_QP_ATTR_LLP_STREAM_HANDLE |
|
||||
IWCH_QP_ATTR_STREAM_MSG_BUFFER |
|
||||
IWCH_QP_ATTR_MPA_ATTR |
|
||||
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
|
||||
};
|
||||
|
||||
int iwch_modify_qp(struct iwch_dev *rhp,
|
||||
struct iwch_qp *qhp,
|
||||
enum iwch_qp_attr_mask mask,
|
||||
struct iwch_qp_attributes *attrs,
|
||||
int internal);
|
||||
|
||||
enum iwch_qp_state {
|
||||
IWCH_QP_STATE_IDLE,
|
||||
IWCH_QP_STATE_RTS,
|
||||
IWCH_QP_STATE_ERROR,
|
||||
IWCH_QP_STATE_TERMINATE,
|
||||
IWCH_QP_STATE_CLOSING,
|
||||
IWCH_QP_STATE_TOT
|
||||
};
|
||||
|
||||
static __inline int
|
||||
iwch_convert_state(enum ib_qp_state ib_state)
|
||||
{
|
||||
switch (ib_state) {
|
||||
case IB_QPS_RESET:
|
||||
case IB_QPS_INIT:
|
||||
return IWCH_QP_STATE_IDLE;
|
||||
case IB_QPS_RTS:
|
||||
return IWCH_QP_STATE_RTS;
|
||||
case IB_QPS_SQD:
|
||||
return IWCH_QP_STATE_CLOSING;
|
||||
case IB_QPS_SQE:
|
||||
return IWCH_QP_STATE_TERMINATE;
|
||||
case IB_QPS_ERR:
|
||||
return IWCH_QP_STATE_ERROR;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static __inline u32
|
||||
iwch_ib_to_tpt_access(int acc)
|
||||
{
|
||||
return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
|
||||
(acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
|
||||
TPT_LOCAL_READ;
|
||||
}
|
||||
|
||||
static __inline u32
|
||||
iwch_ib_to_mwbind_access(int acc)
|
||||
{
|
||||
return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
|
||||
(acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
|
||||
T3_MEM_ACCESS_LOCAL_READ;
|
||||
}
|
||||
|
||||
enum iwch_mmid_state {
|
||||
IWCH_STAG_STATE_VALID,
|
||||
IWCH_STAG_STATE_INVALID
|
||||
};
|
||||
|
||||
enum iwch_qp_query_flags {
|
||||
IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
|
||||
IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
|
||||
IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
|
||||
|
||||
/*
|
||||
* Quiesce QP context; Consumer
|
||||
* will NOT replay outstanding WR
|
||||
*/
|
||||
IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
|
||||
IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
|
||||
IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
|
||||
};
|
||||
|
||||
int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int iwch_bind_mw(struct ib_qp *qp,
|
||||
struct ib_mw *mw,
|
||||
struct ib_mw_bind *mw_bind);
|
||||
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
|
||||
int iwch_register_device(struct iwch_dev *dev);
|
||||
void iwch_unregister_device(struct iwch_dev *dev);
|
||||
void stop_read_rep_timer(struct iwch_qp *qhp);
|
||||
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp,
|
||||
int shift);
|
||||
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp,
|
||||
int shift,
|
||||
int npages);
|
||||
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
|
||||
void iwch_free_pbl(struct iwch_mr *mhp);
|
||||
int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
|
||||
int build_phys_page_list(struct ib_phys_buf *buffer_list,
|
||||
int num_phys_buf,
|
||||
u64 *iova_start,
|
||||
u64 *total_size,
|
||||
int *npages,
|
||||
int *shift,
|
||||
__be64 **page_list);
|
||||
|
||||
|
||||
#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,375 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
***************************************************************************/
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/pciio.h>
|
||||
#include <sys/conf.h>
|
||||
#include <machine/bus.h>
|
||||
#include <machine/resource.h>
|
||||
#include <sys/rman.h>
|
||||
#include <sys/ioccom.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/firmware.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockio.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/syslog.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/libkern.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
|
||||
|
||||
#include <cxgb_include.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_wr.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_hal.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_provider.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_cm.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_resource.h>
|
||||
#include <ulp/iw_cxgb/iw_cxgb_user.h>
|
||||
|
||||
#ifdef needed
|
||||
static struct buf_ring *rhdl_fifo;
|
||||
static struct mtx rhdl_fifo_lock;
|
||||
#endif
|
||||
|
||||
#define RANDOM_SIZE 16
|
||||
|
||||
static int __cxio_init_resource_fifo(struct buf_ring **fifo,
|
||||
struct mtx *fifo_lock,
|
||||
u32 nr, u32 skip_low,
|
||||
u32 skip_high,
|
||||
int randomize)
|
||||
{
|
||||
u32 i, j, idx;
|
||||
u32 random_bytes;
|
||||
u32 rarray[16];
|
||||
mtx_init(fifo_lock, "cxio fifo", NULL, MTX_DEF|MTX_DUPOK);
|
||||
|
||||
*fifo = buf_ring_alloc(nr, M_DEVBUF, M_NOWAIT, fifo_lock);
|
||||
if (*fifo == NULL)
|
||||
return (-ENOMEM);
|
||||
#if 0
|
||||
for (i = 0; i < skip_low + skip_high; i++) {
|
||||
u32 entry = 0;
|
||||
|
||||
buf_ring_enqueue(*fifo, (uintptr_t) entry);
|
||||
}
|
||||
#endif
|
||||
if (randomize) {
|
||||
j = 0;
|
||||
random_bytes = random();
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
rarray[i] = i + skip_low;
|
||||
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
|
||||
if (j >= RANDOM_SIZE) {
|
||||
j = 0;
|
||||
random_bytes = random();
|
||||
}
|
||||
idx = (random_bytes >> (j * 2)) & 0xF;
|
||||
buf_ring_enqueue(*fifo, (void *)(uintptr_t)rarray[idx]);
|
||||
rarray[idx] = i;
|
||||
j++;
|
||||
}
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
buf_ring_enqueue(*fifo, (void *) (uintptr_t)rarray[i]);
|
||||
} else
|
||||
for (i = skip_low; i < nr - skip_high; i++)
|
||||
buf_ring_enqueue(*fifo, (void *) (uintptr_t)i);
|
||||
#if 0
|
||||
for (i = 0; i < skip_low + skip_high; i++)
|
||||
buf_ring_dequeue_sc(*fifo);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxio_init_resource_fifo(struct buf_ring **fifo, struct mtx * fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 0));
|
||||
}
|
||||
|
||||
static int cxio_init_resource_fifo_random(struct buf_ring **fifo,
|
||||
struct mtx * fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
|
||||
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 1));
|
||||
}
|
||||
|
||||
static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
mtx_init(&rdev_p->rscp->qpid_fifo_lock, "qpid fifo", NULL, MTX_DEF);
|
||||
|
||||
rdev_p->rscp->qpid_fifo = buf_ring_alloc(T3_MAX_NUM_QP, M_DEVBUF,
|
||||
M_NOWAIT, &rdev_p->rscp->qpid_fifo_lock);
|
||||
if (rdev_p->rscp->qpid_fifo == NULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
for (i = 16; i < T3_MAX_NUM_QP; i++)
|
||||
if (!(i & rdev_p->qpmask))
|
||||
buf_ring_enqueue(rdev_p->rscp->qpid_fifo, (void *) (uintptr_t)i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef needed
|
||||
int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
|
||||
{
|
||||
return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
|
||||
0);
|
||||
}
|
||||
|
||||
void cxio_hal_destroy_rhdl_resource(void)
|
||||
{
|
||||
buf_ring_free(rhdl_fifo, M_DEVBUF);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* nr_* must be power of 2 */
|
||||
int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
|
||||
u32 nr_tpt, u32 nr_pbl,
|
||||
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
|
||||
{
|
||||
int err = 0;
|
||||
struct cxio_hal_resource *rscp;
|
||||
|
||||
rscp = malloc(sizeof(*rscp), M_DEVBUF, M_NOWAIT|M_ZERO);
|
||||
if (!rscp)
|
||||
return (-ENOMEM);
|
||||
rdev_p->rscp = rscp;
|
||||
err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
|
||||
&rscp->tpt_fifo_lock,
|
||||
nr_tpt, 1, 0);
|
||||
if (err)
|
||||
goto tpt_err;
|
||||
err = cxio_init_qpid_fifo(rdev_p);
|
||||
if (err)
|
||||
goto qpid_err;
|
||||
err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
|
||||
nr_cqid, 1, 0);
|
||||
if (err)
|
||||
goto cqid_err;
|
||||
err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
|
||||
nr_pdid, 1, 0);
|
||||
if (err)
|
||||
goto pdid_err;
|
||||
return 0;
|
||||
pdid_err:
|
||||
buf_ring_free(rscp->cqid_fifo, M_DEVBUF);
|
||||
cqid_err:
|
||||
buf_ring_free(rscp->qpid_fifo, M_DEVBUF);
|
||||
qpid_err:
|
||||
buf_ring_free(rscp->tpt_fifo, M_DEVBUF);
|
||||
tpt_err:
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if no resource available
|
||||
*/
|
||||
static u32 cxio_hal_get_resource(struct buf_ring *fifo, struct mtx *lock)
|
||||
{
|
||||
u32 entry;
|
||||
|
||||
mtx_lock(lock);
|
||||
entry = (u32)(uintptr_t)buf_ring_dequeue_sc(fifo);
|
||||
mtx_unlock(lock);
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void cxio_hal_put_resource(struct buf_ring *fifo, u32 entry, struct mtx *lock)
|
||||
{
|
||||
mtx_lock(lock);
|
||||
buf_ring_enqueue(fifo, (void *) (uintptr_t)entry);
|
||||
mtx_unlock(lock);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(rscp->tpt_fifo, &rscp->tpt_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
|
||||
{
|
||||
cxio_hal_put_resource(rscp->tpt_fifo, stag, &rscp->tpt_fifo_lock);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo, &rscp->qpid_fifo_lock);
|
||||
CTR2(KTR_IW_CXGB, "%s qpid 0x%x", __FUNCTION__, qpid);
|
||||
return qpid;
|
||||
}
|
||||
|
||||
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
|
||||
{
|
||||
CTR2(KTR_IW_CXGB, "%s qpid 0x%x", __FUNCTION__, qpid);
|
||||
cxio_hal_put_resource(rscp->qpid_fifo, qpid, &rscp->qpid_fifo_lock);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(rscp->cqid_fifo, &rscp->cqid_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
|
||||
{
|
||||
cxio_hal_put_resource(rscp->cqid_fifo, cqid, &rscp->cqid_fifo_lock);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(rscp->pdid_fifo, &rscp->pdid_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
|
||||
{
|
||||
cxio_hal_put_resource(rscp->pdid_fifo, pdid, &rscp->pdid_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
buf_ring_free(rscp->tpt_fifo, M_DEVBUF);
|
||||
buf_ring_free(rscp->cqid_fifo, M_DEVBUF);
|
||||
buf_ring_free(rscp->qpid_fifo, M_DEVBUF);
|
||||
buf_ring_free(rscp->pdid_fifo, M_DEVBUF);
|
||||
free(rscp, M_DEVBUF);
|
||||
}
|
||||
|
||||
/*
|
||||
* PBL Memory Manager. Uses Linux generic allocator.
|
||||
*/
|
||||
|
||||
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
|
||||
#define PBL_CHUNK 2*1024*1024
|
||||
|
||||
u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
|
||||
CTR3(KTR_IW_CXGB, "%s addr 0x%x size %d", __FUNCTION__, (u32)addr, size);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
CTR3(KTR_IW_CXGB, "%s addr 0x%x size %d", __FUNCTION__, addr, size);
|
||||
gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
|
||||
rdev_p->pbl_pool = gen_pool_create(rdev_p->rnic_info.pbl_base, MIN_PBL_SHIFT,
|
||||
rdev_p->rnic_info.pbl_top - rdev_p->rnic_info.pbl_base);
|
||||
#if 0
|
||||
if (rdev_p->pbl_pool) {
|
||||
|
||||
unsigned long i;
|
||||
for (i = rdev_p->rnic_info.pbl_base;
|
||||
i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1;
|
||||
i += PBL_CHUNK)
|
||||
gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
|
||||
}
|
||||
#endif
|
||||
return rdev_p->pbl_pool ? 0 : (-ENOMEM);
|
||||
}
|
||||
|
||||
void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
gen_pool_destroy(rdev_p->pbl_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* RQT Memory Manager. Uses Linux generic allocator.
|
||||
*/
|
||||
|
||||
#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
|
||||
#define RQT_CHUNK 2*1024*1024
|
||||
|
||||
u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
|
||||
CTR3(KTR_IW_CXGB, "%s addr 0x%x size %d", __FUNCTION__, (u32)addr, size << 6);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
CTR3(KTR_IW_CXGB, "%s addr 0x%x size %d", __FUNCTION__, addr, size << 6);
|
||||
gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
|
||||
}
|
||||
|
||||
int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
|
||||
rdev_p->rqt_pool = gen_pool_create(rdev_p->rnic_info.rqt_base,
|
||||
MIN_RQT_SHIFT, rdev_p->rnic_info.rqt_top - rdev_p->rnic_info.rqt_base);
|
||||
#if 0
|
||||
if (rdev_p->rqt_pool) {
|
||||
unsigned long i;
|
||||
|
||||
for (i = rdev_p->rnic_info.rqt_base;
|
||||
i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
|
||||
i += RQT_CHUNK)
|
||||
gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
|
||||
}
|
||||
#endif
|
||||
return rdev_p->rqt_pool ? 0 : (-ENOMEM);
|
||||
}
|
||||
|
||||
void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
gen_pool_destroy(rdev_p->rqt_pool);
|
||||
}
|
||||
#endif
|
@ -1,59 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef __CXIO_RESOURCE_H__
|
||||
#define __CXIO_RESOURCE_H__
|
||||
|
||||
extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
|
||||
extern void cxio_hal_destroy_rhdl_resource(void);
|
||||
extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
|
||||
u32 nr_tpt, u32 nr_pbl,
|
||||
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
|
||||
u32 nr_pdid);
|
||||
extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
|
||||
extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
|
||||
extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
|
||||
extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
|
||||
|
||||
#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
|
||||
extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
|
||||
extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
|
||||
extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
|
||||
extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
|
||||
|
||||
#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
|
||||
extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
|
||||
extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
|
||||
extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
|
||||
extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
|
||||
#endif
|
@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2008 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#ifndef __IWCH_USER_H__
|
||||
#define __IWCH_USER_H__
|
||||
|
||||
#define IWCH_UVERBS_ABI_VERSION 1
|
||||
|
||||
/*
|
||||
* Make sure that all structs defined in this file remain laid out so
|
||||
* that they pack the same way on 32-bit and 64-bit architectures (to
|
||||
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
|
||||
* In particular do not use pointer types -- pass pointers in uint64_t
|
||||
* instead.
|
||||
*/
|
||||
struct iwch_create_cq_req {
|
||||
uint64_t user_rptr_addr;
|
||||
};
|
||||
|
||||
struct iwch_create_cq_resp_v0 {
|
||||
__u64 key;
|
||||
__u32 cqid;
|
||||
__u32 size_log2;
|
||||
};
|
||||
|
||||
struct iwch_create_cq_resp {
|
||||
uint64_t key;
|
||||
uint32_t cqid;
|
||||
uint32_t size_log2;
|
||||
__u32 memsize;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct iwch_create_qp_resp {
|
||||
uint64_t key;
|
||||
uint64_t db_key;
|
||||
uint32_t qpid;
|
||||
uint32_t size_log2;
|
||||
uint32_t sq_size_log2;
|
||||
uint32_t rq_size_log2;
|
||||
};
|
||||
|
||||
struct iwch_reg_user_mr_resp {
|
||||
uint32_t pbl_addr;
|
||||
};
|
||||
#endif
|
@ -1,729 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2008 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef __CXIO_WR_H__
|
||||
#define __CXIO_WR_H__
|
||||
#define T3_MAX_SGE 4
|
||||
#define T3_MAX_INLINE 64
|
||||
#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
|
||||
#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
|
||||
#define T3_STAG0_PAGE_SHIFT 15
|
||||
|
||||
#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
|
||||
#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
|
||||
((rptr)!=(wptr)) )
|
||||
#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
|
||||
#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
|
||||
#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
|
||||
#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
|
||||
|
||||
static __inline void
|
||||
ring_doorbell(void /* __iomem */ *doorbell, u32 qpid)
|
||||
{
|
||||
writel(doorbell, ((1<<31) | qpid));
|
||||
}
|
||||
|
||||
#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
|
||||
|
||||
enum t3_wr_flags {
|
||||
T3_COMPLETION_FLAG = 0x01,
|
||||
T3_NOTIFY_FLAG = 0x02,
|
||||
T3_SOLICITED_EVENT_FLAG = 0x04,
|
||||
T3_READ_FENCE_FLAG = 0x08,
|
||||
T3_LOCAL_FENCE_FLAG = 0x10
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum t3_wr_opcode {
|
||||
T3_WR_BP = FW_WROPCODE_RI_BYPASS,
|
||||
T3_WR_SEND = FW_WROPCODE_RI_SEND,
|
||||
T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
|
||||
T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
|
||||
T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
|
||||
T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
|
||||
T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
|
||||
T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
|
||||
T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum t3_rdma_opcode {
|
||||
T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
|
||||
T3_READ_REQ,
|
||||
T3_READ_RESP,
|
||||
T3_SEND,
|
||||
T3_SEND_WITH_INV,
|
||||
T3_SEND_WITH_SE,
|
||||
T3_SEND_WITH_SE_INV,
|
||||
T3_TERMINATE,
|
||||
T3_RDMA_INIT, /* CHELSIO RI specific ... */
|
||||
T3_BIND_MW,
|
||||
T3_FAST_REGISTER,
|
||||
T3_LOCAL_INV,
|
||||
T3_QP_MOD,
|
||||
T3_BYPASS
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
|
||||
{
|
||||
switch (wrop) {
|
||||
case T3_WR_BP: return T3_BYPASS;
|
||||
case T3_WR_SEND: return T3_SEND;
|
||||
case T3_WR_WRITE: return T3_RDMA_WRITE;
|
||||
case T3_WR_READ: return T3_READ_REQ;
|
||||
case T3_WR_INV_STAG: return T3_LOCAL_INV;
|
||||
case T3_WR_BIND: return T3_BIND_MW;
|
||||
case T3_WR_INIT: return T3_RDMA_INIT;
|
||||
case T3_WR_QP_MOD: return T3_QP_MOD;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Work request id */
|
||||
union t3_wrid {
|
||||
struct {
|
||||
u32 hi;
|
||||
u32 low;
|
||||
} id0;
|
||||
u64 id1;
|
||||
};
|
||||
|
||||
#define WRID(wrid) (wrid.id1)
|
||||
#define WRID_GEN(wrid) (wrid.id0.wr_gen)
|
||||
#define WRID_IDX(wrid) (wrid.id0.wr_idx)
|
||||
#define WRID_LO(wrid) (wrid.id0.wr_lo)
|
||||
|
||||
struct fw_riwrh {
|
||||
__be32 op_seop_flags;
|
||||
__be32 gen_tid_len;
|
||||
};
|
||||
|
||||
#define S_FW_RIWR_OP 24
|
||||
#define M_FW_RIWR_OP 0xff
|
||||
#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
|
||||
#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
|
||||
|
||||
#define S_FW_RIWR_SOPEOP 22
|
||||
#define M_FW_RIWR_SOPEOP 0x3
|
||||
#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
|
||||
|
||||
#define S_FW_RIWR_FLAGS 8
|
||||
#define M_FW_RIWR_FLAGS 0x3fffff
|
||||
#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
|
||||
#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
|
||||
|
||||
#define S_FW_RIWR_TID 8
|
||||
#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
|
||||
|
||||
#define S_FW_RIWR_LEN 0
|
||||
#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
|
||||
|
||||
#define S_FW_RIWR_GEN 31
|
||||
#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
|
||||
|
||||
struct t3_sge {
|
||||
__be32 stag;
|
||||
__be32 len;
|
||||
__be64 to;
|
||||
};
|
||||
|
||||
/* If num_sgle is zero, flit 5+ contains immediate data.*/
|
||||
struct t3_send_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 reserved[3];
|
||||
__be32 rem_stag;
|
||||
__be32 plen; /* 3 */
|
||||
__be32 num_sgle;
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
|
||||
};
|
||||
|
||||
struct t3_local_inv_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 stag; /* 2 */
|
||||
__be32 reserved3;
|
||||
};
|
||||
|
||||
struct t3_rdma_write_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 reserved[3];
|
||||
__be32 stag_sink;
|
||||
__be64 to_sink; /* 3 */
|
||||
__be32 plen; /* 4 */
|
||||
__be32 num_sgle;
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
|
||||
};
|
||||
|
||||
struct t3_rdma_read_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 reserved[3];
|
||||
__be32 rem_stag;
|
||||
__be64 rem_to; /* 3 */
|
||||
__be32 local_stag; /* 4 */
|
||||
__be32 local_len;
|
||||
__be64 local_to; /* 5 */
|
||||
};
|
||||
|
||||
enum t3_addr_type {
|
||||
T3_VA_BASED_TO = 0x0,
|
||||
T3_ZERO_BASED_TO = 0x1
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum t3_mem_perms {
|
||||
T3_MEM_ACCESS_LOCAL_READ = 0x1,
|
||||
T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
|
||||
T3_MEM_ACCESS_REM_READ = 0x4,
|
||||
T3_MEM_ACCESS_REM_WRITE = 0x8
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct t3_bind_mw_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u16 reserved; /* 2 */
|
||||
u8 type;
|
||||
u8 perms;
|
||||
__be32 mr_stag;
|
||||
__be32 mw_stag; /* 3 */
|
||||
__be32 mw_len;
|
||||
__be64 mw_va; /* 4 */
|
||||
__be32 mr_pbl_addr; /* 5 */
|
||||
u8 reserved2[3];
|
||||
u8 mr_pagesz;
|
||||
};
|
||||
|
||||
struct t3_receive_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 pagesz[T3_MAX_SGE];
|
||||
__be32 num_sgle; /* 2 */
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
|
||||
__be32 pbl_addr[T3_MAX_SGE];
|
||||
};
|
||||
|
||||
struct t3_bypass_wr {
|
||||
struct fw_riwrh wrh;
|
||||
union t3_wrid wrid; /* 1 */
|
||||
};
|
||||
|
||||
struct t3_modify_qp_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 flags; /* 2 */
|
||||
__be32 quiesce; /* 2 */
|
||||
__be32 max_ird; /* 3 */
|
||||
__be32 max_ord; /* 3 */
|
||||
__be64 sge_cmd; /* 4 */
|
||||
__be64 ctx1; /* 5 */
|
||||
__be64 ctx0; /* 6 */
|
||||
};
|
||||
|
||||
enum t3_modify_qp_flags {
|
||||
MODQP_QUIESCE = 0x01,
|
||||
MODQP_MAX_IRD = 0x02,
|
||||
MODQP_MAX_ORD = 0x04,
|
||||
MODQP_WRITE_EC = 0x08,
|
||||
MODQP_READ_EC = 0x10,
|
||||
};
|
||||
|
||||
|
||||
enum t3_mpa_attrs {
|
||||
uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
|
||||
uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
|
||||
uP_RI_MPA_CRC_ENABLE = 0x4,
|
||||
uP_RI_MPA_IETF_ENABLE = 0x8
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum t3_qp_caps {
|
||||
uP_RI_QP_RDMA_READ_ENABLE = 0x01,
|
||||
uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
|
||||
uP_RI_QP_BIND_ENABLE = 0x04,
|
||||
uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
|
||||
uP_RI_QP_STAG0_ENABLE = 0x10
|
||||
} __attribute__ ((packed));
|
||||
|
||||
enum rdma_init_rtr_types {
|
||||
RTR_READ = 1,
|
||||
RTR_WRITE = 2,
|
||||
RTR_SEND = 3,
|
||||
};
|
||||
|
||||
#define S_RTR_TYPE 2
|
||||
#define M_RTR_TYPE 0x3
|
||||
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
|
||||
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
|
||||
|
||||
#define S_CHAN 4
|
||||
#define M_CHAN 0x3
|
||||
#define V_CHAN(x) ((x) << S_CHAN)
|
||||
#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
|
||||
|
||||
struct t3_rdma_init_attr {
|
||||
u32 tid;
|
||||
u32 qpid;
|
||||
u32 pdid;
|
||||
u32 scqid;
|
||||
u32 rcqid;
|
||||
u32 rq_addr;
|
||||
u32 rq_size;
|
||||
enum t3_mpa_attrs mpaattrs;
|
||||
enum t3_qp_caps qpcaps;
|
||||
u16 tcp_emss;
|
||||
u32 ord;
|
||||
u32 ird;
|
||||
u64 qp_dma_addr;
|
||||
u32 qp_dma_size;
|
||||
enum rdma_init_rtr_types rtr_type;
|
||||
u16 flags;
|
||||
u16 rqe_count;
|
||||
u32 irs;
|
||||
u32 chan;
|
||||
};
|
||||
|
||||
struct t3_rdma_init_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 qpid; /* 2 */
|
||||
__be32 pdid;
|
||||
__be32 scqid; /* 3 */
|
||||
__be32 rcqid;
|
||||
__be32 rq_addr; /* 4 */
|
||||
__be32 rq_size;
|
||||
u8 mpaattrs; /* 5 */
|
||||
u8 qpcaps;
|
||||
__be16 ulpdu_size;
|
||||
__be16 flags_rtr_type;
|
||||
__be16 rqe_count;
|
||||
__be32 ord; /* 6 */
|
||||
__be32 ird;
|
||||
__be64 qp_dma_addr; /* 7 */
|
||||
__be32 qp_dma_size; /* 8 */
|
||||
__be32 irs;
|
||||
};
|
||||
|
||||
struct t3_genbit {
|
||||
u64 flit[15];
|
||||
__be64 genbit;
|
||||
};
|
||||
|
||||
enum rdma_init_wr_flags {
|
||||
MPA_INITIATOR = (1<<0),
|
||||
PRIV_QP = (1<<1),
|
||||
};
|
||||
|
||||
union t3_wr {
|
||||
struct t3_send_wr send;
|
||||
struct t3_rdma_write_wr write;
|
||||
struct t3_rdma_read_wr read;
|
||||
struct t3_receive_wr recv;
|
||||
struct t3_local_inv_wr local_inv;
|
||||
struct t3_bind_mw_wr bind;
|
||||
struct t3_bypass_wr bypass;
|
||||
struct t3_rdma_init_wr init;
|
||||
struct t3_modify_qp_wr qp_mod;
|
||||
struct t3_genbit genbit;
|
||||
u64 flit[16];
|
||||
};
|
||||
|
||||
#define T3_SQ_CQE_FLIT 13
|
||||
#define T3_SQ_COOKIE_FLIT 14
|
||||
|
||||
#define T3_RQ_COOKIE_FLIT 13
|
||||
#define T3_RQ_CQE_FLIT 14
|
||||
|
||||
static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
|
||||
{
|
||||
return G_FW_RIWR_OP(be32toh(wqe->op_seop_flags));
|
||||
}
|
||||
|
||||
static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
|
||||
enum t3_wr_flags flags, u8 genbit, u32 tid,
|
||||
u8 len)
|
||||
{
|
||||
wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
|
||||
V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
|
||||
V_FW_RIWR_FLAGS(flags));
|
||||
wmb();
|
||||
wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) |
|
||||
V_FW_RIWR_TID(tid) |
|
||||
V_FW_RIWR_LEN(len));
|
||||
/* 2nd gen bit... */
|
||||
((union t3_wr *)wqe)->genbit.genbit = htobe64(genbit);
|
||||
}
|
||||
|
||||
/*
|
||||
* T3 ULP2_TX commands
|
||||
*/
|
||||
enum t3_utx_mem_op {
|
||||
T3_UTX_MEM_READ = 2,
|
||||
T3_UTX_MEM_WRITE = 3
|
||||
};
|
||||
|
||||
/* T3 MC7 RDMA TPT entry format */
|
||||
|
||||
enum tpt_mem_type {
|
||||
TPT_NON_SHARED_MR = 0x0,
|
||||
TPT_SHARED_MR = 0x1,
|
||||
TPT_MW = 0x2,
|
||||
TPT_MW_RELAXED_PROTECTION = 0x3
|
||||
};
|
||||
|
||||
enum tpt_addr_type {
|
||||
TPT_ZBTO = 0,
|
||||
TPT_VATO = 1
|
||||
};
|
||||
|
||||
enum tpt_mem_perm {
|
||||
TPT_LOCAL_READ = 0x8,
|
||||
TPT_LOCAL_WRITE = 0x4,
|
||||
TPT_REMOTE_READ = 0x2,
|
||||
TPT_REMOTE_WRITE = 0x1
|
||||
};
|
||||
|
||||
struct tpt_entry {
|
||||
__be32 valid_stag_pdid;
|
||||
__be32 flags_pagesize_qpid;
|
||||
|
||||
__be32 rsvd_pbl_addr;
|
||||
__be32 len;
|
||||
__be32 va_hi;
|
||||
__be32 va_low_or_fbo;
|
||||
|
||||
__be32 rsvd_bind_cnt_or_pstag;
|
||||
__be32 rsvd_pbl_size;
|
||||
};
|
||||
|
||||
#define S_TPT_VALID 31
|
||||
#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
|
||||
#define F_TPT_VALID V_TPT_VALID(1U)
|
||||
|
||||
#define S_TPT_STAG_KEY 23
|
||||
#define M_TPT_STAG_KEY 0xFF
|
||||
#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
|
||||
#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
|
||||
|
||||
#define S_TPT_STAG_STATE 22
|
||||
#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
|
||||
#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
|
||||
|
||||
#define S_TPT_STAG_TYPE 20
|
||||
#define M_TPT_STAG_TYPE 0x3
|
||||
#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
|
||||
#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
|
||||
|
||||
#define S_TPT_PDID 0
|
||||
#define M_TPT_PDID 0xFFFFF
|
||||
#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
|
||||
#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
|
||||
|
||||
#define S_TPT_PERM 28
|
||||
#define M_TPT_PERM 0xF
|
||||
#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
|
||||
#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
|
||||
|
||||
#define S_TPT_REM_INV_DIS 27
|
||||
#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
|
||||
#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
|
||||
|
||||
#define S_TPT_ADDR_TYPE 26
|
||||
#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
|
||||
#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
|
||||
|
||||
#define S_TPT_MW_BIND_ENABLE 25
|
||||
#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
|
||||
#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
|
||||
|
||||
#define S_TPT_PAGE_SIZE 20
|
||||
#define M_TPT_PAGE_SIZE 0x1F
|
||||
#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
|
||||
#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
|
||||
|
||||
#define S_TPT_PBL_ADDR 0
|
||||
#define M_TPT_PBL_ADDR 0x1FFFFFFF
|
||||
#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
|
||||
#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
|
||||
|
||||
#define S_TPT_QPID 0
|
||||
#define M_TPT_QPID 0xFFFFF
|
||||
#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
|
||||
#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
|
||||
|
||||
#define S_TPT_PSTAG 0
|
||||
#define M_TPT_PSTAG 0xFFFFFF
|
||||
#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
|
||||
#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
|
||||
|
||||
#define S_TPT_PBL_SIZE 0
|
||||
#define M_TPT_PBL_SIZE 0xFFFFF
|
||||
#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
|
||||
#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
|
||||
|
||||
/*
|
||||
* CQE defs
|
||||
*/
|
||||
struct t3_cqe {
|
||||
__be32 header;
|
||||
__be32 len;
|
||||
union {
|
||||
struct {
|
||||
__be32 stag;
|
||||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 wrid_hi;
|
||||
u32 wrid_low;
|
||||
} scqe;
|
||||
} u;
|
||||
};
|
||||
|
||||
#define S_CQE_OOO 31
|
||||
#define M_CQE_OOO 0x1
|
||||
#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
|
||||
#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
|
||||
|
||||
#define S_CQE_QPID 12
|
||||
#define M_CQE_QPID 0x7FFFF
|
||||
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
|
||||
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
|
||||
|
||||
#define S_CQE_SWCQE 11
|
||||
#define M_CQE_SWCQE 0x1
|
||||
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
|
||||
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
|
||||
|
||||
#define S_CQE_GENBIT 10
|
||||
#define M_CQE_GENBIT 0x1
|
||||
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
|
||||
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
|
||||
|
||||
#define S_CQE_STATUS 5
|
||||
#define M_CQE_STATUS 0x1F
|
||||
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
|
||||
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
|
||||
|
||||
#define S_CQE_TYPE 4
|
||||
#define M_CQE_TYPE 0x1
|
||||
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
|
||||
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
|
||||
|
||||
#define S_CQE_OPCODE 0
|
||||
#define M_CQE_OPCODE 0xF
|
||||
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
|
||||
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
|
||||
|
||||
#define SW_CQE(x) (G_CQE_SWCQE(be32toh((x).header)))
|
||||
#define CQE_OOO(x) (G_CQE_OOO(be32toh((x).header)))
|
||||
#define CQE_QPID(x) (G_CQE_QPID(be32toh((x).header)))
|
||||
#define CQE_GENBIT(x) (G_CQE_GENBIT(be32toh((x).header)))
|
||||
#define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x).header)))
|
||||
#define SQ_TYPE(x) (CQE_TYPE((x)))
|
||||
#define RQ_TYPE(x) (!CQE_TYPE((x)))
|
||||
#define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x).header)))
|
||||
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x).header)))
|
||||
|
||||
#define CQE_SEND_OPCODE(x)( \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
|
||||
|
||||
#define CQE_LEN(x) (be32toh((x).len))
|
||||
|
||||
/* used for RQ completion processing */
|
||||
#define CQE_WRID_STAG(x) (be32toh((x).u.rcqe.stag))
|
||||
#define CQE_WRID_MSN(x) (be32toh((x).u.rcqe.msn))
|
||||
|
||||
/* used for SQ completion processing */
|
||||
#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
|
||||
#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
|
||||
|
||||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
|
||||
#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
|
||||
|
||||
#define TPT_ERR_SUCCESS 0x0
|
||||
#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
|
||||
/* STAG is offlimt, being 0, */
|
||||
/* or STAG_key mismatch */
|
||||
#define TPT_ERR_PDID 0x2 /* PDID mismatch */
|
||||
#define TPT_ERR_QPID 0x3 /* QPID mismatch */
|
||||
#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
|
||||
#define TPT_ERR_WRAP 0x5 /* Wrap error */
|
||||
#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
|
||||
#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define TPT_ERR_ECC 0x9 /* ECC error detected */
|
||||
#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
|
||||
/* reading PSTAG for a MW */
|
||||
/* Invalidate */
|
||||
#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
|
||||
/* software error */
|
||||
#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
|
||||
#define TPT_ERR_CRC 0x10 /* CRC error */
|
||||
#define TPT_ERR_MARKER 0x11 /* Marker error */
|
||||
#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
|
||||
#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
|
||||
#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
|
||||
#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
|
||||
#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
|
||||
#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
|
||||
#define TPT_ERR_MSN 0x18 /* MSN error */
|
||||
#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
|
||||
#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
|
||||
/* or READ_REQ */
|
||||
#define TPT_ERR_MSN_GAP 0x1B
|
||||
#define TPT_ERR_MSN_RANGE 0x1C
|
||||
#define TPT_ERR_IRD_OVERFLOW 0x1D
|
||||
#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
|
||||
/* software error */
|
||||
#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
|
||||
/* mismatch) */
|
||||
|
||||
struct t3_swsq {
|
||||
uint64_t wr_id;
|
||||
struct t3_cqe cqe;
|
||||
uint32_t sq_wptr;
|
||||
__be32 read_len;
|
||||
int opcode;
|
||||
int complete;
|
||||
int signaled;
|
||||
};
|
||||
|
||||
struct t3_swrq {
|
||||
__u64 wr_id;
|
||||
__u32 pbl_addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* A T3 WQ implements both the SQ and RQ.
|
||||
*/
|
||||
struct t3_wq {
|
||||
union t3_wr *queue; /* DMA accessible memory */
|
||||
bus_addr_t dma_addr; /* DMA address for HW */
|
||||
u32 error; /* 1 once we go to ERROR */
|
||||
u32 qpid;
|
||||
u32 wptr; /* idx to next available WR slot */
|
||||
u32 size_log2; /* total wq size */
|
||||
struct t3_swsq *sq; /* SW SQ */
|
||||
struct t3_swsq *oldest_read; /* tracks oldest pending read */
|
||||
u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
|
||||
u32 sq_rptr; /* pending wrs */
|
||||
u32 sq_size_log2; /* sq size */
|
||||
struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
|
||||
u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
|
||||
u32 rq_rptr; /* pending wrs */
|
||||
struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
|
||||
u32 rq_size_log2; /* rq size */
|
||||
u32 rq_addr; /* rq adapter address */
|
||||
void *doorbell; /* kernel db */
|
||||
u64 udb; /* user db if any */
|
||||
struct cxio_rdev *rdev;
|
||||
};
|
||||
|
||||
struct t3_cq {
|
||||
u32 cqid;
|
||||
u32 rptr;
|
||||
u32 wptr;
|
||||
u32 size_log2;
|
||||
bus_addr_t dma_addr;
|
||||
struct t3_cqe *queue;
|
||||
struct t3_cqe *sw_queue;
|
||||
u32 sw_rptr;
|
||||
u32 sw_wptr;
|
||||
};
|
||||
|
||||
#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
|
||||
CQE_GENBIT(*cqe))
|
||||
|
||||
struct t3_cq_status_page {
|
||||
u32 cq_err;
|
||||
};
|
||||
|
||||
static inline int cxio_cq_in_error(struct t3_cq *cq)
|
||||
{
|
||||
return ((struct t3_cq_status_page *)
|
||||
&cq->queue[1 << cq->size_log2])->cq_err;
|
||||
}
|
||||
|
||||
static inline void cxio_set_cq_in_error(struct t3_cq *cq)
|
||||
{
|
||||
((struct t3_cq_status_page *)
|
||||
&cq->queue[1 << cq->size_log2])->cq_err = 1;
|
||||
}
|
||||
|
||||
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
|
||||
{
|
||||
wq->queue->flit[13] = 1;
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
|
||||
if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
|
||||
return cqe;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
|
||||
cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
|
||||
return cqe;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
|
||||
cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
|
||||
return cqe;
|
||||
}
|
||||
cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
|
||||
if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
|
||||
return cqe;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,460 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2012 Chelsio Communications, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/module.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/socket.h>
|
||||
#include <net/if.h>
|
||||
#include <net/ethernet.h>
|
||||
#include <net/if_vlan_var.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/toecore.h>
|
||||
|
||||
#include "cxgb_include.h"
|
||||
#include "ulp/tom/cxgb_tom.h"
|
||||
#include "ulp/tom/cxgb_l2t.h"
|
||||
|
||||
#define VLAN_NONE 0xfff
|
||||
#define SA(x) ((struct sockaddr *)(x))
|
||||
#define SIN(x) ((struct sockaddr_in *)(x))
|
||||
#define SINADDR(x) (SIN(x)->sin_addr.s_addr)
|
||||
|
||||
/*
|
||||
* Module locking notes: There is a RW lock protecting the L2 table as a
|
||||
* whole plus a mutex per L2T entry. Entry lookups and allocations happen
|
||||
* under the protection of the table lock, individual entry changes happen
|
||||
* while holding that entry's mutex. The table lock nests outside the
|
||||
* entry locks. Allocations of new entries take the table lock as writers so
|
||||
* no other lookups can happen while allocating new entries. Entry updates
|
||||
* take the table lock as readers so multiple entries can be updated in
|
||||
* parallel. An L2T entry can be dropped by decrementing its reference count
|
||||
* and therefore can happen in parallel with entry allocation but no entry
|
||||
* can change state or increment its ref count during allocation as both of
|
||||
* these perform lookups.
|
||||
*
|
||||
* When acquiring multiple locks, the order is llentry -> L2 table -> L2 entry.
|
||||
*/
|
||||
|
||||
static inline unsigned int
|
||||
arp_hash(u32 key, int ifindex, const struct l2t_data *d)
|
||||
{
|
||||
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up an L2T entry and send any packets waiting in the arp queue. Must be
|
||||
* called with the entry locked.
|
||||
*/
|
||||
static int
|
||||
setup_l2e_send_pending(struct adapter *sc, struct l2t_entry *e)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct cpl_l2t_write_req *req;
|
||||
struct port_info *pi = &sc->port[e->smt_idx]; /* smt_idx is port_id */
|
||||
|
||||
mtx_assert(&e->lock, MA_OWNED);
|
||||
|
||||
m = M_GETHDR_OFLD(pi->first_qset, CPL_PRIORITY_CONTROL, req);
|
||||
if (m == NULL) {
|
||||
log(LOG_ERR, "%s: no mbuf, can't setup L2 entry at index %d\n",
|
||||
__func__, e->idx);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
|
||||
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
|
||||
V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) |
|
||||
V_L2T_W_PRIO(EVL_PRIOFTAG(e->vlan)));
|
||||
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
|
||||
|
||||
t3_offload_tx(sc, m);
|
||||
|
||||
/*
|
||||
* XXX: We used pi->first_qset to send the L2T_WRITE_REQ. If any mbuf
|
||||
* on the arpq is going out via another queue set associated with the
|
||||
* port then it has a bad race with the L2T_WRITE_REQ. Ideally we
|
||||
* should wait till the reply to the write before draining the arpq.
|
||||
*/
|
||||
while (e->arpq_head) {
|
||||
m = e->arpq_head;
|
||||
e->arpq_head = m->m_next;
|
||||
m->m_next = NULL;
|
||||
t3_offload_tx(sc, m);
|
||||
}
|
||||
e->arpq_tail = NULL;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
|
||||
* Must be called with the entry's lock held.
|
||||
*/
|
||||
static inline void
|
||||
arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
|
||||
{
|
||||
mtx_assert(&e->lock, MA_OWNED);
|
||||
|
||||
m->m_next = NULL;
|
||||
if (e->arpq_head)
|
||||
e->arpq_tail->m_next = m;
|
||||
else
|
||||
e->arpq_head = m;
|
||||
e->arpq_tail = m;
|
||||
}
|
||||
|
||||
static void
|
||||
resolution_failed_mbuf(struct mbuf *m)
|
||||
{
|
||||
log(LOG_ERR, "%s: leaked mbuf %p, CPL at %p",
|
||||
__func__, m, mtod(m, void *));
|
||||
}
|
||||
|
||||
static void
|
||||
resolution_failed(struct l2t_entry *e)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
mtx_assert(&e->lock, MA_OWNED);
|
||||
|
||||
while (e->arpq_head) {
|
||||
m = e->arpq_head;
|
||||
e->arpq_head = m->m_next;
|
||||
m->m_next = NULL;
|
||||
resolution_failed_mbuf(m);
|
||||
}
|
||||
e->arpq_tail = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
|
||||
uint16_t vtag)
|
||||
{
|
||||
|
||||
mtx_assert(&e->lock, MA_OWNED);
|
||||
|
||||
/*
|
||||
* The entry may be in active use (e->refcount > 0) or not. We update
|
||||
* it even when it's not as this simplifies the case where we decide to
|
||||
* reuse the entry later.
|
||||
*/
|
||||
|
||||
if (lladdr == NULL &&
|
||||
(e->state == L2T_STATE_RESOLVING || e->state == L2T_STATE_FAILED)) {
|
||||
/*
|
||||
* Never got a valid L2 address for this one. Just mark it as
|
||||
* failed instead of removing it from the hash (for which we'd
|
||||
* need to wlock the table).
|
||||
*/
|
||||
e->state = L2T_STATE_FAILED;
|
||||
resolution_failed(e);
|
||||
return;
|
||||
|
||||
} else if (lladdr == NULL) {
|
||||
|
||||
/* Valid or already-stale entry was deleted (or expired) */
|
||||
|
||||
KASSERT(e->state == L2T_STATE_VALID ||
|
||||
e->state == L2T_STATE_STALE,
|
||||
("%s: lladdr NULL, state %d", __func__, e->state));
|
||||
|
||||
e->state = L2T_STATE_STALE;
|
||||
|
||||
} else {
|
||||
|
||||
if (e->state == L2T_STATE_RESOLVING ||
|
||||
e->state == L2T_STATE_FAILED ||
|
||||
memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
|
||||
|
||||
/* unresolved -> resolved; or dmac changed */
|
||||
|
||||
memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
|
||||
e->vlan = vtag;
|
||||
setup_l2e_send_pending(sc, e);
|
||||
}
|
||||
e->state = L2T_STATE_VALID;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
resolve_entry(struct adapter *sc, struct l2t_entry *e)
|
||||
{
|
||||
struct tom_data *td = sc->tom_softc;
|
||||
struct toedev *tod = &td->tod;
|
||||
struct sockaddr_in sin = {0};
|
||||
uint8_t dmac[ETHER_HDR_LEN];
|
||||
uint16_t vtag = EVL_VLID_MASK;
|
||||
int rc;
|
||||
|
||||
sin.sin_family = AF_INET;
|
||||
sin.sin_len = sizeof(struct sockaddr_in);
|
||||
SINADDR(&sin) = e->addr;
|
||||
|
||||
rc = toe_l2_resolve(tod, e->ifp, SA(&sin), dmac, &vtag);
|
||||
if (rc == EWOULDBLOCK)
|
||||
return (rc);
|
||||
|
||||
mtx_lock(&e->lock);
|
||||
update_entry(sc, e, rc == 0 ? dmac : NULL, vtag);
|
||||
mtx_unlock(&e->lock);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
int
|
||||
t3_l2t_send_slow(struct adapter *sc, struct mbuf *m, struct l2t_entry *e)
|
||||
{
|
||||
|
||||
again:
|
||||
switch (e->state) {
|
||||
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
|
||||
|
||||
if (resolve_entry(sc, e) != EWOULDBLOCK)
|
||||
goto again; /* entry updated, re-examine state */
|
||||
|
||||
/* Fall through */
|
||||
|
||||
case L2T_STATE_VALID: /* fast-path, send the packet on */
|
||||
|
||||
return (t3_offload_tx(sc, m));
|
||||
|
||||
case L2T_STATE_RESOLVING:
|
||||
mtx_lock(&e->lock);
|
||||
if (e->state != L2T_STATE_RESOLVING) {
|
||||
mtx_unlock(&e->lock);
|
||||
goto again;
|
||||
}
|
||||
arpq_enqueue(e, m);
|
||||
mtx_unlock(&e->lock);
|
||||
|
||||
if (resolve_entry(sc, e) == EWOULDBLOCK)
|
||||
break;
|
||||
|
||||
mtx_lock(&e->lock);
|
||||
if (e->state == L2T_STATE_VALID && e->arpq_head)
|
||||
setup_l2e_send_pending(sc, e);
|
||||
if (e->state == L2T_STATE_FAILED)
|
||||
resolution_failed(e);
|
||||
mtx_unlock(&e->lock);
|
||||
break;
|
||||
|
||||
case L2T_STATE_FAILED:
|
||||
resolution_failed_mbuf(m);
|
||||
return (EHOSTUNREACH);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
|
||||
*/
|
||||
static struct l2t_entry *
|
||||
alloc_l2e(struct l2t_data *d)
|
||||
{
|
||||
struct l2t_entry *end, *e, **p;
|
||||
|
||||
rw_assert(&d->lock, RA_WLOCKED);
|
||||
|
||||
if (!atomic_load_acq_int(&d->nfree))
|
||||
return (NULL);
|
||||
|
||||
/* there's definitely a free entry */
|
||||
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) {
|
||||
if (atomic_load_acq_int(&e->refcnt) == 0)
|
||||
goto found;
|
||||
}
|
||||
|
||||
for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e)
|
||||
continue;
|
||||
found:
|
||||
d->rover = e + 1;
|
||||
atomic_add_int(&d->nfree, -1);
|
||||
|
||||
/*
|
||||
* The entry we found may be an inactive entry that is
|
||||
* presently in the hash table. We need to remove it.
|
||||
*/
|
||||
if (e->state != L2T_STATE_UNUSED) {
|
||||
int hash = arp_hash(e->addr, e->ifp->if_index, d);
|
||||
|
||||
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) {
|
||||
if (*p == e) {
|
||||
*p = e->next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
e->state = L2T_STATE_UNUSED;
|
||||
}
|
||||
|
||||
return (e);
|
||||
}
|
||||
|
||||
struct l2t_entry *
|
||||
t3_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
|
||||
{
|
||||
struct tom_data *td = pi->adapter->tom_softc;
|
||||
struct l2t_entry *e;
|
||||
struct l2t_data *d = td->l2t;
|
||||
uint32_t addr = SINADDR(sa);
|
||||
int hash = arp_hash(addr, ifp->if_index, d);
|
||||
unsigned int smt_idx = pi->port_id;
|
||||
|
||||
rw_wlock(&d->lock);
|
||||
for (e = d->l2tab[hash].first; e; e = e->next) {
|
||||
if (e->addr == addr && e->ifp == ifp && e->smt_idx == smt_idx) {
|
||||
l2t_hold(d, e);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* Need to allocate a new entry */
|
||||
e = alloc_l2e(d);
|
||||
if (e) {
|
||||
mtx_lock(&e->lock); /* avoid race with t3_l2t_free */
|
||||
e->next = d->l2tab[hash].first;
|
||||
d->l2tab[hash].first = e;
|
||||
|
||||
e->state = L2T_STATE_RESOLVING;
|
||||
e->addr = addr;
|
||||
e->ifp = ifp;
|
||||
e->smt_idx = smt_idx;
|
||||
atomic_store_rel_int(&e->refcnt, 1);
|
||||
|
||||
KASSERT(ifp->if_vlantrunk == NULL, ("TOE+VLAN unimplemented."));
|
||||
e->vlan = VLAN_NONE;
|
||||
|
||||
mtx_unlock(&e->lock);
|
||||
}
|
||||
|
||||
done:
|
||||
rw_wunlock(&d->lock);
|
||||
|
||||
return (e);
|
||||
}
|
||||
|
||||
void
|
||||
t3_l2_update(struct toedev *tod, struct ifnet *ifp, struct sockaddr *sa,
|
||||
uint8_t *lladdr, uint16_t vtag)
|
||||
{
|
||||
struct tom_data *td = t3_tomdata(tod);
|
||||
struct adapter *sc = tod->tod_softc;
|
||||
struct l2t_entry *e;
|
||||
struct l2t_data *d = td->l2t;
|
||||
u32 addr = *(u32 *) &SIN(sa)->sin_addr;
|
||||
int hash = arp_hash(addr, ifp->if_index, d);
|
||||
|
||||
rw_rlock(&d->lock);
|
||||
for (e = d->l2tab[hash].first; e; e = e->next)
|
||||
if (e->addr == addr && e->ifp == ifp) {
|
||||
mtx_lock(&e->lock);
|
||||
goto found;
|
||||
}
|
||||
rw_runlock(&d->lock);
|
||||
|
||||
/*
|
||||
* This is of no interest to us. We've never had an offloaded
|
||||
* connection to this destination, and we aren't attempting one right
|
||||
* now.
|
||||
*/
|
||||
return;
|
||||
|
||||
found:
|
||||
rw_runlock(&d->lock);
|
||||
|
||||
KASSERT(e->state != L2T_STATE_UNUSED,
|
||||
("%s: unused entry in the hash.", __func__));
|
||||
|
||||
update_entry(sc, e, lladdr, vtag);
|
||||
mtx_unlock(&e->lock);
|
||||
}
|
||||
|
||||
struct l2t_data *
|
||||
t3_init_l2t(unsigned int l2t_capacity)
|
||||
{
|
||||
struct l2t_data *d;
|
||||
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
|
||||
|
||||
d = malloc(size, M_CXGB, M_NOWAIT | M_ZERO);
|
||||
if (!d)
|
||||
return (NULL);
|
||||
|
||||
d->nentries = l2t_capacity;
|
||||
d->rover = &d->l2tab[1]; /* entry 0 is not used */
|
||||
atomic_store_rel_int(&d->nfree, l2t_capacity - 1);
|
||||
rw_init(&d->lock, "L2T");
|
||||
|
||||
for (i = 0; i < l2t_capacity; ++i) {
|
||||
d->l2tab[i].idx = i;
|
||||
d->l2tab[i].state = L2T_STATE_UNUSED;
|
||||
mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF);
|
||||
atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
|
||||
}
|
||||
return (d);
|
||||
}
|
||||
|
||||
void
|
||||
t3_free_l2t(struct l2t_data *d)
|
||||
{
|
||||
int i;
|
||||
|
||||
rw_destroy(&d->lock);
|
||||
for (i = 0; i < d->nentries; ++i)
|
||||
mtx_destroy(&d->l2tab[i].lock);
|
||||
|
||||
free(d, M_CXGB);
|
||||
}
|
||||
|
||||
static int
|
||||
do_l2t_write_rpl(struct sge_qset *qs, struct rsp_desc *r, struct mbuf *m)
|
||||
{
|
||||
struct cpl_l2t_write_rpl *rpl = mtod(m, void *);
|
||||
|
||||
if (rpl->status != CPL_ERR_NONE)
|
||||
log(LOG_ERR,
|
||||
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
|
||||
rpl->status, GET_TID(rpl));
|
||||
|
||||
m_freem(m);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
t3_init_l2t_cpl_handlers(struct adapter *sc)
|
||||
{
|
||||
t3_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
|
||||
}
|
||||
#endif
|
@ -1,114 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007-2009, Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef _CHELSIO_L2T_H
|
||||
#define _CHELSIO_L2T_H
|
||||
|
||||
#include <sys/lock.h>
|
||||
#include <sys/rwlock.h>
|
||||
|
||||
enum {
|
||||
L2T_SIZE = 2048
|
||||
};
|
||||
|
||||
enum {
|
||||
L2T_STATE_VALID, /* entry is up to date */
|
||||
L2T_STATE_STALE, /* entry may be used but needs revalidation */
|
||||
L2T_STATE_RESOLVING, /* entry needs address resolution */
|
||||
L2T_STATE_FAILED, /* failed to resolve */
|
||||
L2T_STATE_UNUSED /* entry not in use */
|
||||
};
|
||||
|
||||
/*
|
||||
* Each L2T entry plays multiple roles. First of all, it keeps state for the
|
||||
* corresponding entry of the HW L2 table and maintains a queue of offload
|
||||
* packets awaiting address resolution. Second, it is a node of a hash table
|
||||
* chain, where the nodes of the chain are linked together through their next
|
||||
* pointer. Finally, each node is a bucket of a hash table, pointing to the
|
||||
* first element in its chain through its first pointer.
|
||||
*/
|
||||
struct l2t_entry {
|
||||
uint16_t state; /* entry state */
|
||||
uint16_t idx; /* entry index */
|
||||
uint32_t addr; /* nexthop IP address */
|
||||
struct ifnet *ifp; /* outgoing interface */
|
||||
uint16_t smt_idx; /* SMT index */
|
||||
uint16_t vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
|
||||
struct l2t_entry *first; /* start of hash chain */
|
||||
struct l2t_entry *next; /* next l2t_entry on chain */
|
||||
struct mbuf *arpq_head; /* queue of packets awaiting resolution */
|
||||
struct mbuf *arpq_tail;
|
||||
struct mtx lock;
|
||||
volatile uint32_t refcnt; /* entry reference count */
|
||||
uint8_t dmac[ETHER_ADDR_LEN]; /* nexthop's MAC address */
|
||||
};
|
||||
|
||||
struct l2t_data {
|
||||
unsigned int nentries; /* number of entries */
|
||||
struct l2t_entry *rover; /* starting point for next allocation */
|
||||
volatile uint32_t nfree; /* number of free entries */
|
||||
struct rwlock lock;
|
||||
struct l2t_entry l2tab[0];
|
||||
};
|
||||
|
||||
void t3_l2e_free(struct l2t_data *, struct l2t_entry *e);
|
||||
void t3_l2_update(struct toedev *tod, struct ifnet *ifp, struct sockaddr *sa,
|
||||
uint8_t *lladdr, uint16_t vtag);
|
||||
struct l2t_entry *t3_l2t_get(struct port_info *, struct ifnet *,
|
||||
struct sockaddr *);
|
||||
int t3_l2t_send_slow(struct adapter *, struct mbuf *, struct l2t_entry *);
|
||||
struct l2t_data *t3_init_l2t(unsigned int);
|
||||
void t3_free_l2t(struct l2t_data *);
|
||||
void t3_init_l2t_cpl_handlers(struct adapter *);
|
||||
|
||||
static inline int
|
||||
l2t_send(struct adapter *sc, struct mbuf *m, struct l2t_entry *e)
|
||||
{
|
||||
if (__predict_true(e->state == L2T_STATE_VALID))
|
||||
return t3_offload_tx(sc, m);
|
||||
else
|
||||
return t3_l2t_send_slow(sc, m, e);
|
||||
}
|
||||
|
||||
static inline void
|
||||
l2t_release(struct l2t_data *d, struct l2t_entry *e)
|
||||
{
|
||||
if (atomic_fetchadd_int(&e->refcnt, -1) == 1) /* 1 -> 0 transition */
|
||||
atomic_add_int(&d->nfree, 1);
|
||||
}
|
||||
|
||||
static inline void
|
||||
l2t_hold(struct l2t_data *d, struct l2t_entry *e)
|
||||
{
|
||||
if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */
|
||||
atomic_add_int(&d->nfree, -1);
|
||||
}
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,95 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2007-2009, Chelsio Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#ifndef CXGB_TOEPCB_H_
|
||||
#define CXGB_TOEPCB_H_
|
||||
#include <sys/bus.h>
|
||||
#include <sys/condvar.h>
|
||||
#include <sys/limits.h>
|
||||
|
||||
#define TP_DATASENT (1 << 0)
|
||||
#define TP_TX_WAIT_IDLE (1 << 1)
|
||||
#define TP_FIN_SENT (1 << 2)
|
||||
#define TP_ABORT_RPL_PENDING (1 << 3)
|
||||
#define TP_ABORT_SHUTDOWN (1 << 4)
|
||||
#define TP_ABORT_RPL_RCVD (1 << 5)
|
||||
#define TP_ABORT_REQ_RCVD (1 << 6)
|
||||
#define TP_ATTACHED (1 << 7)
|
||||
#define TP_CPL_DONE (1 << 8)
|
||||
#define TP_IS_A_SYNQ_ENTRY (1 << 9)
|
||||
#define TP_ABORT_RPL_SENT (1 << 10)
|
||||
#define TP_SEND_FIN (1 << 11)
|
||||
#define TP_SYNQE_EXPANDED (1 << 12)
|
||||
|
||||
struct toepcb {
|
||||
TAILQ_ENTRY(toepcb) link; /* toep_list */
|
||||
int tp_flags;
|
||||
struct toedev *tp_tod;
|
||||
struct l2t_entry *tp_l2t;
|
||||
int tp_tid;
|
||||
int tp_wr_max;
|
||||
int tp_wr_avail;
|
||||
int tp_wr_unacked;
|
||||
int tp_delack_mode;
|
||||
int tp_ulp_mode;
|
||||
int tp_qset;
|
||||
int tp_enqueued;
|
||||
int tp_rx_credits;
|
||||
|
||||
struct inpcb *tp_inp;
|
||||
struct mbuf *tp_m_last;
|
||||
|
||||
struct mbufq wr_list;
|
||||
struct mbufq out_of_order_queue;
|
||||
};
|
||||
|
||||
static inline void
|
||||
reset_wr_list(struct toepcb *toep)
|
||||
{
|
||||
mbufq_init(&toep->wr_list, INT_MAX); /* XXX: sane limit needed */
|
||||
}
|
||||
|
||||
static inline void
|
||||
enqueue_wr(struct toepcb *toep, struct mbuf *m)
|
||||
{
|
||||
(void )mbufq_enqueue(&toep->wr_list, m);
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
peek_wr(const struct toepcb *toep)
|
||||
{
|
||||
return (mbufq_first(&toep->wr_list));
|
||||
}
|
||||
|
||||
static inline struct mbuf *
|
||||
dequeue_wr(struct toepcb *toep)
|
||||
{
|
||||
return (mbufq_dequeue(&toep->wr_list));
|
||||
}
|
||||
|
||||
#endif
|
@ -1,396 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2012 Chelsio Communications, Inc.
|
||||
* All rights reserved.
|
||||
* Written by: Navdeep Parhar <np@FreeBSD.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/module.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/toecore.h>
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include "cxgb_include.h"
|
||||
#include "ulp/tom/cxgb_tom.h"
|
||||
#include "ulp/tom/cxgb_l2t.h"
|
||||
#include "ulp/tom/cxgb_toepcb.h"
|
||||
|
||||
MALLOC_DEFINE(M_CXGB, "cxgb", "Chelsio T3 Offload services");
|
||||
|
||||
/* Module ops */
|
||||
static int t3_tom_mod_load(void);
|
||||
static int t3_tom_mod_unload(void);
|
||||
static int t3_tom_modevent(module_t, int, void *);
|
||||
|
||||
/* ULD ops and helpers */
|
||||
static int t3_tom_activate(struct adapter *);
|
||||
static int t3_tom_deactivate(struct adapter *);
|
||||
|
||||
static int alloc_tid_tabs(struct tid_info *, u_int, u_int, u_int, u_int, u_int);
|
||||
static void free_tid_tabs(struct tid_info *);
|
||||
static int write_smt_entry(struct adapter *, int);
|
||||
static void free_tom_data(struct tom_data *);
|
||||
|
||||
static struct uld_info tom_uld_info = {
|
||||
.uld_id = ULD_TOM,
|
||||
.activate = t3_tom_activate,
|
||||
.deactivate = t3_tom_deactivate,
|
||||
};
|
||||
|
||||
struct toepcb *
|
||||
toepcb_alloc(struct toedev *tod)
|
||||
{
|
||||
struct toepcb *toep;
|
||||
|
||||
toep = malloc(sizeof(struct toepcb), M_CXGB, M_NOWAIT | M_ZERO);
|
||||
if (toep == NULL)
|
||||
return (NULL);
|
||||
|
||||
toep->tp_tod = tod;
|
||||
toep->tp_wr_max = toep->tp_wr_avail = 15;
|
||||
toep->tp_wr_unacked = 0;
|
||||
toep->tp_delack_mode = 0;
|
||||
|
||||
return (toep);
|
||||
}
|
||||
|
||||
void
|
||||
toepcb_free(struct toepcb *toep)
|
||||
{
|
||||
free(toep, M_CXGB);
|
||||
}
|
||||
|
||||
static int
|
||||
alloc_tid_tabs(struct tid_info *t, u_int ntids, u_int natids, u_int nstids,
|
||||
u_int atid_base, u_int stid_base)
|
||||
{
|
||||
unsigned long size = ntids * sizeof(*t->tid_tab) +
|
||||
natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
|
||||
|
||||
t->tid_tab = malloc(size, M_CXGB, M_NOWAIT | M_ZERO);
|
||||
if (!t->tid_tab)
|
||||
return (ENOMEM);
|
||||
|
||||
t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
|
||||
t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
|
||||
t->ntids = ntids;
|
||||
t->nstids = nstids;
|
||||
t->stid_base = stid_base;
|
||||
t->sfree = NULL;
|
||||
t->natids = natids;
|
||||
t->atid_base = atid_base;
|
||||
t->afree = NULL;
|
||||
t->stids_in_use = t->atids_in_use = 0;
|
||||
t->tids_in_use = 0;
|
||||
mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
|
||||
mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
|
||||
|
||||
/*
|
||||
* Setup the free lists for stid_tab and atid_tab.
|
||||
*/
|
||||
if (nstids) {
|
||||
while (--nstids)
|
||||
t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
|
||||
t->sfree = t->stid_tab;
|
||||
}
|
||||
if (natids) {
|
||||
while (--natids)
|
||||
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
|
||||
t->afree = t->atid_tab;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
free_tid_tabs(struct tid_info *t)
|
||||
{
|
||||
if (mtx_initialized(&t->stid_lock))
|
||||
mtx_destroy(&t->stid_lock);
|
||||
if (mtx_initialized(&t->atid_lock))
|
||||
mtx_destroy(&t->atid_lock);
|
||||
free(t->tid_tab, M_CXGB);
|
||||
}
|
||||
|
||||
static int
|
||||
write_smt_entry(struct adapter *sc, int idx)
|
||||
{
|
||||
struct port_info *pi = &sc->port[idx];
|
||||
struct cpl_smt_write_req *req;
|
||||
struct mbuf *m;
|
||||
|
||||
m = M_GETHDR_OFLD(0, CPL_PRIORITY_CONTROL, req);
|
||||
if (m == NULL) {
|
||||
log(LOG_ERR, "%s: no mbuf, can't write SMT entry for %d\n",
|
||||
__func__, idx);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
|
||||
req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
|
||||
req->iff = idx;
|
||||
memset(req->src_mac1, 0, sizeof(req->src_mac1));
|
||||
memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
|
||||
|
||||
t3_offload_tx(sc, m);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
free_tom_data(struct tom_data *td)
|
||||
{
|
||||
KASSERT(TAILQ_EMPTY(&td->toep_list),
|
||||
("%s: toep_list not empty", __func__));
|
||||
|
||||
if (td->listen_mask != 0)
|
||||
hashdestroy(td->listen_hash, M_CXGB, td->listen_mask);
|
||||
|
||||
if (mtx_initialized(&td->toep_list_lock))
|
||||
mtx_destroy(&td->toep_list_lock);
|
||||
if (mtx_initialized(&td->lctx_hash_lock))
|
||||
mtx_destroy(&td->lctx_hash_lock);
|
||||
if (mtx_initialized(&td->tid_release_lock))
|
||||
mtx_destroy(&td->tid_release_lock);
|
||||
if (td->l2t)
|
||||
t3_free_l2t(td->l2t);
|
||||
free_tid_tabs(&td->tid_maps);
|
||||
free(td, M_CXGB);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ground control to Major TOM
|
||||
* Commencing countdown, engines on
|
||||
*/
|
||||
static int
|
||||
t3_tom_activate(struct adapter *sc)
|
||||
{
|
||||
struct tom_data *td;
|
||||
struct toedev *tod;
|
||||
int i, rc = 0;
|
||||
struct mc5_params *mc5 = &sc->params.mc5;
|
||||
u_int ntids, natids, mtus;
|
||||
|
||||
ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
|
||||
|
||||
/* per-adapter softc for TOM */
|
||||
td = malloc(sizeof(*td), M_CXGB, M_ZERO | M_NOWAIT);
|
||||
if (td == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
/* List of TOE PCBs and associated lock */
|
||||
mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
|
||||
TAILQ_INIT(&td->toep_list);
|
||||
|
||||
/* Listen context */
|
||||
mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
|
||||
td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGB,
|
||||
&td->listen_mask, HASH_NOWAIT);
|
||||
|
||||
/* TID release task */
|
||||
TASK_INIT(&td->tid_release_task, 0 , t3_process_tid_release_list, td);
|
||||
mtx_init(&td->tid_release_lock, "tid release", NULL, MTX_DEF);
|
||||
|
||||
/* L2 table */
|
||||
td->l2t = t3_init_l2t(L2T_SIZE);
|
||||
if (td->l2t == NULL) {
|
||||
rc = ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* TID tables */
|
||||
ntids = t3_mc5_size(&sc->mc5) - mc5->nroutes - mc5->nfilters -
|
||||
mc5->nservers;
|
||||
natids = min(ntids / 2, 64 * 1024);
|
||||
rc = alloc_tid_tabs(&td->tid_maps, ntids, natids, mc5->nservers,
|
||||
0x100000 /* ATID_BASE */, ntids);
|
||||
if (rc != 0)
|
||||
goto done;
|
||||
|
||||
/* CPL handlers */
|
||||
t3_init_listen_cpl_handlers(sc);
|
||||
t3_init_l2t_cpl_handlers(sc);
|
||||
t3_init_cpl_io(sc);
|
||||
|
||||
/* toedev ops */
|
||||
tod = &td->tod;
|
||||
init_toedev(tod);
|
||||
tod->tod_softc = sc;
|
||||
tod->tod_connect = t3_connect;
|
||||
tod->tod_listen_start = t3_listen_start;
|
||||
tod->tod_listen_stop = t3_listen_stop;
|
||||
tod->tod_rcvd = t3_rcvd;
|
||||
tod->tod_output = t3_tod_output;
|
||||
tod->tod_send_rst = t3_send_rst;
|
||||
tod->tod_send_fin = t3_send_fin;
|
||||
tod->tod_pcb_detach = t3_pcb_detach;
|
||||
tod->tod_l2_update = t3_l2_update;
|
||||
tod->tod_syncache_added = t3_syncache_added;
|
||||
tod->tod_syncache_removed = t3_syncache_removed;
|
||||
tod->tod_syncache_respond = t3_syncache_respond;
|
||||
tod->tod_offload_socket = t3_offload_socket;
|
||||
|
||||
/* port MTUs */
|
||||
mtus = sc->port[0].ifp->if_mtu;
|
||||
if (sc->params.nports > 1)
|
||||
mtus |= sc->port[1].ifp->if_mtu << 16;
|
||||
t3_write_reg(sc, A_TP_MTU_PORT_TABLE, mtus);
|
||||
t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
|
||||
sc->params.rev == 0 ? sc->port[0].ifp->if_mtu : 0xffff);
|
||||
|
||||
/* SMT entry for each port */
|
||||
for_each_port(sc, i) {
|
||||
write_smt_entry(sc, i);
|
||||
TOEDEV(sc->port[i].ifp) = &td->tod;
|
||||
}
|
||||
|
||||
/* Switch TP to offload mode */
|
||||
t3_tp_set_offload_mode(sc, 1);
|
||||
|
||||
sc->tom_softc = td;
|
||||
sc->flags |= TOM_INIT_DONE;
|
||||
register_toedev(tod);
|
||||
|
||||
done:
|
||||
if (rc != 0)
|
||||
free_tom_data(td);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
t3_tom_deactivate(struct adapter *sc)
|
||||
{
|
||||
int rc = 0;
|
||||
struct tom_data *td = sc->tom_softc;
|
||||
|
||||
ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
|
||||
|
||||
if (td == NULL)
|
||||
return (0); /* XXX. KASSERT? */
|
||||
|
||||
if (sc->offload_map != 0)
|
||||
return (EBUSY); /* at least one port has IFCAP_TOE enabled */
|
||||
|
||||
mtx_lock(&td->toep_list_lock);
|
||||
if (!TAILQ_EMPTY(&td->toep_list))
|
||||
rc = EBUSY;
|
||||
mtx_unlock(&td->toep_list_lock);
|
||||
|
||||
mtx_lock(&td->lctx_hash_lock);
|
||||
if (td->lctx_count > 0)
|
||||
rc = EBUSY;
|
||||
mtx_unlock(&td->lctx_hash_lock);
|
||||
|
||||
if (rc == 0) {
|
||||
unregister_toedev(&td->tod);
|
||||
t3_tp_set_offload_mode(sc, 0);
|
||||
free_tom_data(td);
|
||||
sc->tom_softc = NULL;
|
||||
sc->flags &= ~TOM_INIT_DONE;
|
||||
}
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static int
|
||||
t3_tom_mod_load(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = t3_register_uld(&tom_uld_info);
|
||||
if (rc != 0)
|
||||
t3_tom_mod_unload();
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static void
|
||||
tom_uninit(struct adapter *sc, void *arg __unused)
|
||||
{
|
||||
/* Try to free resources (works only if no port has IFCAP_TOE) */
|
||||
ADAPTER_LOCK(sc);
|
||||
if (sc->flags & TOM_INIT_DONE)
|
||||
t3_deactivate_uld(sc, ULD_TOM);
|
||||
ADAPTER_UNLOCK(sc);
|
||||
}
|
||||
|
||||
static int
|
||||
t3_tom_mod_unload(void)
|
||||
{
|
||||
t3_iterate(tom_uninit, NULL);
|
||||
|
||||
if (t3_unregister_uld(&tom_uld_info) == EBUSY)
|
||||
return (EBUSY);
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* ifdef TCP_OFFLOAD */
|
||||
|
||||
static int
|
||||
t3_tom_modevent(module_t mod, int cmd, void *arg)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
#ifdef TCP_OFFLOAD
|
||||
switch (cmd) {
|
||||
case MOD_LOAD:
|
||||
rc = t3_tom_mod_load();
|
||||
break;
|
||||
|
||||
case MOD_UNLOAD:
|
||||
rc = t3_tom_mod_unload();
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = EINVAL;
|
||||
}
|
||||
#else
|
||||
rc = EOPNOTSUPP;
|
||||
#endif
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static moduledata_t t3_tom_moddata= {
|
||||
"t3_tom",
|
||||
t3_tom_modevent,
|
||||
0
|
||||
};
|
||||
|
||||
MODULE_VERSION(t3_tom, 1);
|
||||
MODULE_DEPEND(t3_tom, toecore, 1, 1, 1);
|
||||
MODULE_DEPEND(t3_tom, cxgbc, 1, 1, 1);
|
||||
DECLARE_MODULE(t3_tom, t3_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
|
@ -1,280 +0,0 @@
|
||||
/**************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2009 Chelsio Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Neither the name of the Chelsio Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
$FreeBSD$
|
||||
|
||||
***************************************************************************/
|
||||
#ifndef CXGB_TOM_H_
|
||||
#define CXGB_TOM_H_
|
||||
#include <sys/protosw.h>
|
||||
#include <netinet/toecore.h>
|
||||
|
||||
MALLOC_DECLARE(M_CXGB);
|
||||
|
||||
#define KTR_CXGB KTR_SPARE3
|
||||
|
||||
#define LISTEN_HASH_SIZE 32
|
||||
|
||||
/*
|
||||
* Holds the size, base address, free list start, etc of the TID, server TID,
|
||||
* and active-open TID tables for a offload device.
|
||||
* The tables themselves are allocated dynamically.
|
||||
*/
|
||||
struct tid_info {
|
||||
void **tid_tab;
|
||||
unsigned int ntids;
|
||||
volatile unsigned int tids_in_use;
|
||||
|
||||
union listen_entry *stid_tab;
|
||||
unsigned int nstids;
|
||||
unsigned int stid_base;
|
||||
|
||||
union active_open_entry *atid_tab;
|
||||
unsigned int natids;
|
||||
unsigned int atid_base;
|
||||
|
||||
/*
|
||||
* The following members are accessed R/W so we put them in their own
|
||||
* cache lines. TOM_XXX: actually do what is said here.
|
||||
*
|
||||
* XXX We could combine the atid fields above with the lock here since
|
||||
* atids are use once (unlike other tids). OTOH the above fields are
|
||||
* usually in cache due to tid_tab.
|
||||
*/
|
||||
struct mtx atid_lock;
|
||||
union active_open_entry *afree;
|
||||
unsigned int atids_in_use;
|
||||
|
||||
struct mtx stid_lock;
|
||||
union listen_entry *sfree;
|
||||
unsigned int stids_in_use;
|
||||
};
|
||||
|
||||
struct tom_data {
|
||||
struct toedev tod;
|
||||
|
||||
/*
|
||||
* toepcb's associated with this TOE device are either on the
|
||||
* toep list or in the synq of a listening socket in lctx hash.
|
||||
*/
|
||||
struct mtx toep_list_lock;
|
||||
TAILQ_HEAD(, toepcb) toep_list;
|
||||
|
||||
struct l2t_data *l2t;
|
||||
struct tid_info tid_maps;
|
||||
|
||||
/*
|
||||
* The next two locks listen_lock, and tid_release_lock are used rarely
|
||||
* so we let them potentially share a cacheline.
|
||||
*/
|
||||
|
||||
LIST_HEAD(, listen_ctx) *listen_hash;
|
||||
u_long listen_mask;
|
||||
int lctx_count; /* # of lctx in the hash table */
|
||||
struct mtx lctx_hash_lock;
|
||||
|
||||
void **tid_release_list;
|
||||
struct mtx tid_release_lock;
|
||||
struct task tid_release_task;
|
||||
};
|
||||
|
||||
struct synq_entry {
|
||||
TAILQ_ENTRY(synq_entry) link; /* listen_ctx's synq link */
|
||||
int flags; /* same as toepcb's tp_flags */
|
||||
int tid;
|
||||
struct mbuf *m; /* backpointer to containing mbuf */
|
||||
struct listen_ctx *lctx; /* backpointer to listen ctx */
|
||||
struct cpl_pass_establish *cpl;
|
||||
struct toepcb *toep;
|
||||
struct l2t_entry *e;
|
||||
uint32_t iss;
|
||||
uint32_t ts;
|
||||
uint32_t opt0h;
|
||||
uint32_t qset;
|
||||
int rx_credits;
|
||||
volatile u_int refcnt;
|
||||
|
||||
#define RPL_OK 0 /* ok to reply */
|
||||
#define RPL_DONE 1 /* replied already */
|
||||
#define RPL_DONT 2 /* don't reply */
|
||||
volatile u_int reply; /* see above. */
|
||||
};
|
||||
|
||||
#define LCTX_RPL_PENDING 1 /* waiting for CPL_PASS_OPEN_RPL */
|
||||
|
||||
struct listen_ctx {
|
||||
LIST_ENTRY(listen_ctx) link; /* listen hash linkage */
|
||||
volatile int refcnt;
|
||||
int stid;
|
||||
int flags;
|
||||
struct inpcb *inp; /* listening socket's inp */
|
||||
int qset;
|
||||
TAILQ_HEAD(, synq_entry) synq;
|
||||
};
|
||||
|
||||
void t3_process_tid_release_list(void *data, int pending);
|
||||
|
||||
static inline struct tom_data *
|
||||
t3_tomdata(struct toedev *tod)
|
||||
{
|
||||
|
||||
return (__containerof(tod, struct tom_data, tod));
|
||||
}
|
||||
|
||||
union listen_entry {
|
||||
void *ctx;
|
||||
union listen_entry *next;
|
||||
};
|
||||
|
||||
union active_open_entry {
|
||||
void *ctx;
|
||||
union active_open_entry *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* Map an ATID or STID to their entries in the corresponding TID tables.
|
||||
*/
|
||||
static inline union active_open_entry *atid2entry(const struct tid_info *t,
|
||||
unsigned int atid)
|
||||
{
|
||||
return &t->atid_tab[atid - t->atid_base];
|
||||
}
|
||||
|
||||
|
||||
static inline union listen_entry *stid2entry(const struct tid_info *t,
|
||||
unsigned int stid)
|
||||
{
|
||||
return &t->stid_tab[stid - t->stid_base];
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the connection corresponding to a TID.
|
||||
*/
|
||||
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tid >= t->ntids)
|
||||
return (NULL);
|
||||
|
||||
p = t->tid_tab[tid];
|
||||
if (p < (void *)t->tid_tab || p >= (void *)&t->atid_tab[t->natids])
|
||||
return (p);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the connection corresponding to a server TID.
|
||||
*/
|
||||
static inline void *lookup_stid(const struct tid_info *t, unsigned int tid)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
|
||||
return (NULL);
|
||||
|
||||
p = stid2entry(t, tid)->ctx;
|
||||
if (p < (void *)t->tid_tab || p >= (void *)&t->atid_tab[t->natids])
|
||||
return (p);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the connection corresponding to an active-open TID.
|
||||
*/
|
||||
static inline void *lookup_atid(const struct tid_info *t, unsigned int tid)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
|
||||
return (NULL);
|
||||
|
||||
p = atid2entry(t, tid)->ctx;
|
||||
if (p < (void *)t->tid_tab || p >= (void *)&t->atid_tab[t->natids])
|
||||
return (p);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
calc_opt2(int cpu_idx)
|
||||
{
|
||||
uint32_t opt2 = F_CPU_INDEX_VALID | V_CPU_INDEX(cpu_idx);
|
||||
|
||||
/* 3 = highspeed CC algorithm */
|
||||
opt2 |= V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(3) |
|
||||
V_PACING_FLAVOR(1);
|
||||
|
||||
/* coalesce and push bit semantics */
|
||||
opt2 |= F_RX_COALESCE_VALID | V_RX_COALESCE(3);
|
||||
|
||||
return (htobe32(opt2));
|
||||
}
|
||||
|
||||
/* cxgb_tom.c */
|
||||
struct toepcb *toepcb_alloc(struct toedev *);
|
||||
void toepcb_free(struct toepcb *);
|
||||
|
||||
/* cxgb_cpl_io.c */
|
||||
void t3_init_cpl_io(struct adapter *);
|
||||
int t3_push_frames(struct socket *, int);
|
||||
int t3_connect(struct toedev *, struct socket *, struct rtentry *,
|
||||
struct sockaddr *);
|
||||
int t3_tod_output(struct toedev *, struct tcpcb *);
|
||||
int t3_send_rst(struct toedev *, struct tcpcb *);
|
||||
int t3_send_fin(struct toedev *, struct tcpcb *);
|
||||
void insert_tid(struct tom_data *, void *, unsigned int);
|
||||
void update_tid(struct tom_data *, void *, unsigned int);
|
||||
void remove_tid(struct tom_data *, unsigned int);
|
||||
uint32_t calc_opt0h(struct socket *, int, int, struct l2t_entry *);
|
||||
uint32_t calc_opt0l(struct socket *, int);
|
||||
void queue_tid_release(struct toedev *, unsigned int);
|
||||
void offload_socket(struct socket *, struct toepcb *);
|
||||
void undo_offload_socket(struct socket *);
|
||||
int select_rcv_wscale(void);
|
||||
unsigned long select_rcv_wnd(struct socket *);
|
||||
int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int);
|
||||
void make_established(struct socket *, uint32_t, uint32_t, uint16_t);
|
||||
void t3_rcvd(struct toedev *, struct tcpcb *);
|
||||
void t3_pcb_detach(struct toedev *, struct tcpcb *);
|
||||
void send_abort_rpl(struct toedev *, int, int);
|
||||
void release_tid(struct toedev *, unsigned int, int);
|
||||
|
||||
/* cxgb_listen.c */
|
||||
void t3_init_listen_cpl_handlers(struct adapter *);
|
||||
int t3_listen_start(struct toedev *, struct tcpcb *);
|
||||
int t3_listen_stop(struct toedev *, struct tcpcb *);
|
||||
void t3_syncache_added(struct toedev *, void *);
|
||||
void t3_syncache_removed(struct toedev *, void *);
|
||||
int t3_syncache_respond(struct toedev *, void *, struct mbuf *);
|
||||
int do_abort_req_synqe(struct sge_qset *, struct rsp_desc *, struct mbuf *);
|
||||
int do_abort_rpl_synqe(struct sge_qset *, struct rsp_desc *, struct mbuf *);
|
||||
void t3_offload_socket(struct toedev *, void *, struct socket *);
|
||||
#endif
|
@ -5,14 +5,5 @@ SYSDIR?=${SRCTOP}/sys
|
||||
|
||||
SUBDIR= cxgb
|
||||
SUBDIR+= cxgb_t3fw
|
||||
SUBDIR+= ${_tom}
|
||||
SUBDIR+= ${_iw_cxgb}
|
||||
|
||||
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
|
||||
_tom= tom
|
||||
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
|
||||
_iw_cxgb= iw_cxgb
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.include <bsd.subdir.mk>
|
||||
|
@ -1,15 +0,0 @@
|
||||
# $FreeBSD$
|
||||
|
||||
CXGB = ${SRCTOP}/sys/dev/cxgb
|
||||
.PATH: ${CXGB}/ulp/iw_cxgb
|
||||
|
||||
KMOD= iw_cxgb
|
||||
SRCS= iw_cxgb.c iw_cxgb_cm.c iw_cxgb_hal.c
|
||||
SRCS+= iw_cxgb_provider.c iw_cxgb_qp.c iw_cxgb_resource.c
|
||||
SRCS+= iw_cxgb_ev.c iw_cxgb_mem.c iw_cxgb_dbg.c iw_cxgb_cq.c
|
||||
SRCS+= bus_if.h device_if.h opt_sched.h pci_if.h pcib_if.h opt_ktr.h
|
||||
SRCS+= opt_inet.h opt_ofed.h vnode_if.h
|
||||
CFLAGS+= -I${CXGB} -I${SRCTOP}/sys/ofed/include -DLINUX_TYPES_DEFINED
|
||||
CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
|
||||
|
||||
.include <bsd.kmod.mk>
|
@ -1,15 +0,0 @@
|
||||
# $FreeBSD$
|
||||
|
||||
CXGB = ${SRCTOP}/sys/dev/cxgb
|
||||
.PATH: ${CXGB}/ulp/tom
|
||||
|
||||
KMOD= t3_tom
|
||||
SRCS= cxgb_tom.c cxgb_cpl_io.c cxgb_listen.c cxgb_l2t.c
|
||||
SRCS+= opt_compat.h opt_inet.h opt_inet6.h opt_ipsec.h
|
||||
SRCS+= opt_tcpdebug.h opt_ddb.h opt_sched.h opt_ktr.h
|
||||
SRCS+= device_if.h bus_if.h pci_if.h
|
||||
CFLAGS+= -g -I${CXGB}
|
||||
|
||||
#CFLAGS+= -DDEBUG_PRINT -DDEBUG
|
||||
|
||||
.include <bsd.kmod.mk>
|
Loading…
x
Reference in New Issue
Block a user