From 671bf2b8b22f6c737fb6927b1cb7308ba50900ad Mon Sep 17 00:00:00 2001 From: Navdeep Parhar Date: Tue, 5 Jul 2016 01:29:24 +0000 Subject: [PATCH] cxgbe(4): Changes to the CPL-handler registration mechanism and code related to "shared" CPLs. a) Combine t4_set_tcb_field and t4_set_tcb_field_rpl into a single function. Allow callers to direct the response to any iq. Tidy up set_ulp_mode_iscsi while there to use names from t4_tcb.h instead of magic constants. b) Remove all CPL handler tables from struct adapter. This reduces its size by around 2KB. All handlers are now registered at MOD_LOAD instead of attach or some kind of initialization/activation. The registration functions do not need an adapter parameter any more. c) Add per-iq handlers to deal with CPLs whose destination cannot be determined solely from the opcode. There are 2 such CPLs in use right now: SET_TCB_RPL and L2T_WRITE_RPL. The base driver continues to send filter and L2T_WRITEs over the mgmtq and solicits the reply on fwq. t4_tom (including the DDP code) now uses the port's ctrlq to send L2T_WRITEs and SET_TCB_FIELDs and solicits the reply on an ofld_rxq. fwq and ofld_rxq have different handlers that know what kind of tid to expect in the reply. Update t4_write_l2e and callers to to support any wrq/iq combination. Approved by: re@ (kib@) Sponsored by: Chelsio Communications --- sys/dev/cxgbe/adapter.h | 29 +++-- sys/dev/cxgbe/cxgbei/cxgbei.c | 28 ++--- sys/dev/cxgbe/cxgbei/icl_cxgbei.c | 13 ++- sys/dev/cxgbe/iw_cxgbe/cm.c | 30 ++--- sys/dev/cxgbe/iw_cxgbe/device.c | 1 - sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h | 3 - sys/dev/cxgbe/t4_l2t.c | 22 ++-- sys/dev/cxgbe/t4_l2t.h | 4 +- sys/dev/cxgbe/t4_main.c | 179 ++++++++---------------------- sys/dev/cxgbe/t4_sge.c | 132 +++++++++++++++++++--- sys/dev/cxgbe/tom/t4_connect.c | 6 +- sys/dev/cxgbe/tom/t4_cpl_io.c | 71 +++++------- sys/dev/cxgbe/tom/t4_ddp.c | 26 +++-- sys/dev/cxgbe/tom/t4_listen.c | 10 +- sys/dev/cxgbe/tom/t4_tom.c | 25 +++-- sys/dev/cxgbe/tom/t4_tom.h | 15 ++- sys/dev/cxgbe/tom/t4_tom_l2t.c | 45 ++------ sys/dev/cxgbe/tom/t4_tom_l2t.h | 4 +- 18 files changed, 303 insertions(+), 340 deletions(-) diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h index 34cb38fd3633..d31124581efa 100644 --- a/sys/dev/cxgbe/adapter.h +++ b/sys/dev/cxgbe/adapter.h @@ -372,6 +372,13 @@ enum { NM_BUSY = 2, }; +struct sge_iq; +struct rss_header; +typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, + struct mbuf *); +typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); +typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); + /* * Ingress Queue: T4 is producer, driver is consumer. */ @@ -379,6 +386,8 @@ struct sge_iq { uint32_t flags; volatile int state; struct adapter *adapter; + cpl_handler_t set_tcb_rpl; + cpl_handler_t l2t_write_rpl; struct iq_desc *desc; /* KVA of descriptor ring */ int8_t intr_pktc_idx; /* packet count threshold index */ uint8_t gen; /* generation bit */ @@ -739,12 +748,6 @@ struct sge { struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES]; }; -struct rss_header; -typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, - struct mbuf *); -typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); -typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); - struct adapter { SLIST_ENTRY(adapter) link; device_t dev; @@ -783,6 +786,7 @@ struct adapter { struct sge sge; int lro_timeout; + int sc_do_rxcopy; struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */ struct port_info *port[MAX_NPORTS]; @@ -842,15 +846,9 @@ struct adapter { struct memwin memwin[NUM_MEMWIN]; /* memory windows */ - an_handler_t an_handler __aligned(CACHE_LINE_SIZE); - fw_msg_handler_t fw_msg_handler[7]; /* NUM_FW6_TYPES */ - cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */ - const char *last_op; const void *last_op_thr; int last_op_flags; - - int sc_do_rxcopy; }; #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) @@ -1080,9 +1078,6 @@ int t4_os_pci_restore_state(struct adapter *); void t4_os_portmod_changed(const struct adapter *, int); void t4_os_link_changed(struct adapter *, int, int, int); void t4_iterate(void (*)(struct adapter *, void *), void *); -int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t); -int t4_register_an_handler(struct adapter *, an_handler_t); -int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t); int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *); void doom_vi(struct adapter *, struct vi_info *); @@ -1107,7 +1102,6 @@ void t4_nm_intr(void *); void t4_sge_modload(void); void t4_sge_modunload(void); uint64_t t4_sge_extfree_refs(void); -void t4_init_sge_cpl_handlers(struct adapter *); void t4_tweak_chip_settings(struct adapter *); int t4_read_chip_settings(struct adapter *); int t4_create_dma_tag(struct adapter *); @@ -1129,6 +1123,9 @@ int parse_pkt(struct mbuf **); void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *); void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *); int tnl_cong(struct port_info *, int); +int t4_register_an_handler(an_handler_t); +int t4_register_fw_msg_handler(int, fw_msg_handler_t); +int t4_register_cpl_handler(int, cpl_handler_t); /* t4_tracer.c */ struct t4_tracer; diff --git a/sys/dev/cxgbe/cxgbei/cxgbei.c b/sys/dev/cxgbe/cxgbei/cxgbei.c index 775e3a2c9474..face22d02fc1 100644 --- a/sys/dev/cxgbe/cxgbei/cxgbei.c +++ b/sys/dev/cxgbe/cxgbei/cxgbei.c @@ -745,24 +745,6 @@ do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) return (0); } -static void -t4_register_cpl_handler_with_tom(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_ISCSI_HDR, do_rx_iscsi_hdr); - t4_register_cpl_handler(sc, CPL_ISCSI_DATA, do_rx_iscsi_data); - t4_register_cpl_handler(sc, CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp); -} - -static void -t4_unregister_cpl_handler_with_tom(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_ISCSI_HDR, NULL); - t4_register_cpl_handler(sc, CPL_ISCSI_DATA, NULL); - t4_register_cpl_handler(sc, CPL_RX_ISCSI_DDP, NULL); -} - /* initiator */ void cxgbei_conn_task_reserve_itt(void *conn, void **prv, @@ -835,7 +817,6 @@ cxgbei_activate(struct adapter *sc) return (rc); } - t4_register_cpl_handler_with_tom(sc); sc->iscsi_ulp_softc = ci; return (0); @@ -849,7 +830,6 @@ cxgbei_deactivate(struct adapter *sc) if (sc->iscsi_ulp_softc != NULL) { cxgbei_ddp_cleanup(sc->iscsi_ulp_softc); - t4_unregister_cpl_handler_with_tom(sc); free(sc->iscsi_ulp_softc, M_CXGBE); sc->iscsi_ulp_softc = NULL; } @@ -1062,6 +1042,10 @@ cxgbei_mod_load(void) { int rc; + t4_register_cpl_handler(CPL_ISCSI_HDR, do_rx_iscsi_hdr); + t4_register_cpl_handler(CPL_ISCSI_DATA, do_rx_iscsi_data); + t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp); + rc = start_worker_threads(); if (rc != 0) return (rc); @@ -1088,6 +1072,10 @@ cxgbei_mod_unload(void) stop_worker_threads(); + t4_register_cpl_handler(CPL_ISCSI_HDR, NULL); + t4_register_cpl_handler(CPL_ISCSI_DATA, NULL); + t4_register_cpl_handler(CPL_RX_ISCSI_DDP, NULL); + return (0); } #endif diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c index d0e7f399195a..8f29452196fe 100644 --- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c +++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include "common/common.h" +#include "common/t4_tcb.h" #include "tom/t4_tom.h" #include "cxgbei.h" @@ -584,19 +585,19 @@ send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen) static void set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, int hcrc, int dcrc) { - uint64_t val = 0; + uint64_t val = ULP_MODE_ISCSI; if (hcrc) - val |= ULP_CRC_HEADER; + val |= ULP_CRC_HEADER << 4; if (dcrc) - val |= ULP_CRC_DATA; - val <<= 4; - val |= ULP_MODE_ISCSI; + val |= ULP_CRC_DATA << 4; CTR4(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, CRC hdr=%d data=%d", __func__, toep->tid, hcrc, dcrc); - t4_set_tcb_field(sc, toep, 1, 0, 0xfff, val); + t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_ULP_TYPE, + V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val, + 0, 0, toep->ofld_rxq->iq.abs_id); } /* diff --git a/sys/dev/cxgbe/iw_cxgbe/cm.c b/sys/dev/cxgbe/iw_cxgbe/cm.c index 1f6cc3741fff..bea5aa01b850 100644 --- a/sys/dev/cxgbe/iw_cxgbe/cm.c +++ b/sys/dev/cxgbe/iw_cxgbe/cm.c @@ -2463,28 +2463,14 @@ static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbu return 0; } - void -c4iw_cm_init_cpl(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate); - t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl); - t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler); - t4_register_an_handler(sc, c4iw_ev_handler); -} - - void -c4iw_cm_term_cpl(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL); - t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL); - t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL); -} - int __init c4iw_cm_init(void) { + t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate); + t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl); + t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler); + t4_register_an_handler(c4iw_ev_handler); + TAILQ_INIT(&req_list); spin_lock_init(&req_lock); INIT_LIST_HEAD(&timeout_list); @@ -2496,7 +2482,6 @@ int __init c4iw_cm_init(void) if (!c4iw_taskq) return -ENOMEM; - return 0; } @@ -2506,5 +2491,10 @@ void __exit c4iw_cm_term(void) WARN_ON(!list_empty(&timeout_list)); flush_workqueue(c4iw_taskq); destroy_workqueue(c4iw_taskq); + + t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL); + t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL); + t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL); + t4_register_an_handler(NULL); } #endif diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c index ea04190628ae..310b99bdc89f 100644 --- a/sys/dev/cxgbe/iw_cxgbe/device.c +++ b/sys/dev/cxgbe/iw_cxgbe/device.c @@ -227,7 +227,6 @@ c4iw_activate(struct adapter *sc) } sc->iwarp_softc = iwsc; - c4iw_cm_init_cpl(sc); rc = -c4iw_register_device(iwsc); if (rc) { diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h index 0aa2568ee907..faeb5f4bace8 100644 --- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h +++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h @@ -936,9 +936,6 @@ extern int c4iw_max_read_depth; #define L1_CACHE_BYTES 32 #endif -void c4iw_cm_init_cpl(struct adapter *); -void c4iw_cm_term_cpl(struct adapter *); - void your_reg_device(struct c4iw_dev *dev); #define SGE_CTRLQ_NUM 0 diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c index 7c4cedefb280..b4b1ee4d312d 100644 --- a/sys/dev/cxgbe/t4_l2t.c +++ b/sys/dev/cxgbe/t4_l2t.c @@ -111,27 +111,34 @@ t4_alloc_l2e(struct l2t_data *d) * The write may be synchronous or asynchronous. */ int -t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync) +t4_write_l2e(struct l2t_entry *e, int sync) { + struct sge_wrq *wrq; + struct adapter *sc; struct wrq_cookie cookie; struct cpl_l2t_write_req *req; - int idx = e->idx + sc->vres.l2t.start; + int idx; mtx_assert(&e->lock, MA_OWNED); + MPASS(e->wrq != NULL); - req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie); + wrq = e->wrq; + sc = wrq->adapter; + + req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie); if (req == NULL) return (ENOMEM); + idx = e->idx + sc->vres.l2t.start; INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx | - V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id))); + V_SYNC_WR(sync) | V_TID_QID(e->iqid))); req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync)); req->l2t_idx = htons(idx); req->vlan = htons(e->vlan); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - commit_wrq_wr(&sc->sge.mgmtq, req, &cookie); + commit_wrq_wr(wrq, req, &cookie); if (sync && e->state != L2T_STATE_SWITCHING) e->state = L2T_STATE_SYNC_WRITE; @@ -173,9 +180,11 @@ t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan, e->vlan = vlan; e->lport = port; + e->wrq = &sc->sge.mgmtq; + e->iqid = sc->sge.fwq.abs_id; memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN); mtx_lock(&e->lock); - rc = t4_write_l2e(sc, e, 0); + rc = t4_write_l2e(e, 0); mtx_unlock(&e->lock); return (rc); } @@ -211,7 +220,6 @@ t4_init_l2t(struct adapter *sc, int flags) } sc->l2t = d; - t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl); return (0); } diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h index c60eef1367b2..2d861dc9a2fd 100644 --- a/sys/dev/cxgbe/t4_l2t.h +++ b/sys/dev/cxgbe/t4_l2t.h @@ -61,6 +61,8 @@ struct l2t_entry { uint16_t state; /* entry state */ uint16_t idx; /* entry index */ uint32_t addr[4]; /* next hop IP or IPv6 address */ + uint32_t iqid; /* iqid for reply to write_l2e */ + struct sge_wrq *wrq; /* queue to use for write_l2e */ struct ifnet *ifp; /* outgoing interface */ uint16_t smt_idx; /* SMT index */ uint16_t vlan; /* VLAN TCI (id: 0-11, prio: 13-15) */ @@ -90,7 +92,7 @@ struct l2t_entry *t4_alloc_l2e(struct l2t_data *); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *); int t4_l2t_set_switching(struct adapter *, struct l2t_entry *, uint16_t, uint8_t, uint8_t *); -int t4_write_l2e(struct adapter *, struct l2t_entry *, int); +int t4_write_l2e(struct l2t_entry *, int); int do_l2t_write_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); static inline void diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index eeb118078969..247175d660c3 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -458,10 +458,6 @@ static void vi_refresh_stats(struct adapter *, struct vi_info *); static void cxgbe_refresh_stats(struct adapter *, struct port_info *); static void cxgbe_tick(void *); static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); -static int cpl_not_handled(struct sge_iq *, const struct rss_header *, - struct mbuf *); -static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); -static int fw_msg_not_handled(struct adapter *, const __be64 *); static void t4_sysctls(struct adapter *); static void cxgbe_sysctls(struct port_info *); static int sysctl_int_array(SYSCTL_HANDLER_ARGS); @@ -525,6 +521,8 @@ static int del_filter(struct adapter *, struct t4_filter *); static void clear_filter(struct filter_entry *); static int set_filter_wr(struct adapter *, int); static int del_filter_wr(struct adapter *, int); +static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, + struct mbuf *); static int get_sge_context(struct adapter *, struct t4_sge_context *); static int load_fw(struct adapter *, struct t4_data *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); @@ -589,11 +587,6 @@ struct { CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); #endif - -/* No easy way to include t4_msg.h before adapter.h so we check this way */ -CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); -CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); - CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); static int @@ -739,15 +732,6 @@ t4_attach(device_t dev) sc->mbox = sc->pf; memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); - sc->an_handler = an_not_handled; - for (i = 0; i < nitems(sc->cpl_handler); i++) - sc->cpl_handler[i] = cpl_not_handled; - for (i = 0; i < nitems(sc->fw_msg_handler); i++) - sc->fw_msg_handler[i] = fw_msg_not_handled; - t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); - t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); - t4_register_cpl_handler(sc, CPL_T5_TRACE_PKT, t5_trace_pkt); - t4_init_sge_cpl_handlers(sc); /* Prepare the adapter for operation. */ buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); @@ -4500,98 +4484,6 @@ cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) VLAN_SETCOOKIE(vlan, ifp); } -static int -cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) -{ - -#ifdef INVARIANTS - panic("%s: opcode 0x%02x on iq %p with payload %p", - __func__, rss->opcode, iq, m); -#else - log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", - __func__, rss->opcode, iq, m); - m_freem(m); -#endif - return (EDOOFUS); -} - -int -t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) -{ - uintptr_t *loc, new; - - if (opcode >= nitems(sc->cpl_handler)) - return (EINVAL); - - new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; - loc = (uintptr_t *) &sc->cpl_handler[opcode]; - atomic_store_rel_ptr(loc, new); - - return (0); -} - -static int -an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) -{ - -#ifdef INVARIANTS - panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); -#else - log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", - __func__, iq, ctrl); -#endif - return (EDOOFUS); -} - -int -t4_register_an_handler(struct adapter *sc, an_handler_t h) -{ - uintptr_t *loc, new; - - new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; - loc = (uintptr_t *) &sc->an_handler; - atomic_store_rel_ptr(loc, new); - - return (0); -} - -static int -fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) -{ - const struct cpl_fw6_msg *cpl = - __containerof(rpl, struct cpl_fw6_msg, data[0]); - -#ifdef INVARIANTS - panic("%s: fw_msg type %d", __func__, cpl->type); -#else - log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); -#endif - return (EDOOFUS); -} - -int -t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) -{ - uintptr_t *loc, new; - - if (type >= nitems(sc->fw_msg_handler)) - return (EINVAL); - - /* - * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL - * handler dispatch table. Reject any attempt to install a handler for - * this subtype. - */ - if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) - return (EINVAL); - - new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; - loc = (uintptr_t *) &sc->fw_msg_handler[type]; - atomic_store_rel_ptr(loc, new); - - return (0); -} - /* * Should match fw_caps_config_ enums in t4fw_interface.h */ @@ -8262,38 +8154,53 @@ t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); + MPASS(iq == &sc->sge.fwq); + MPASS(is_ftid(sc, idx)); - if (is_ftid(sc, idx)) { + idx -= sc->tids.ftid_base; + f = &sc->tids.ftid_tab[idx]; + rc = G_COOKIE(rpl->cookie); - idx -= sc->tids.ftid_base; - f = &sc->tids.ftid_tab[idx]; - rc = G_COOKIE(rpl->cookie); - - mtx_lock(&sc->tids.ftid_lock); - if (rc == FW_FILTER_WR_FLT_ADDED) { - KASSERT(f->pending, ("%s: filter[%u] isn't pending.", - __func__, idx)); - f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; - f->pending = 0; /* asynchronous setup completed */ - f->valid = 1; - } else { - if (rc != FW_FILTER_WR_FLT_DELETED) { - /* Add or delete failed, display an error */ - log(LOG_ERR, - "filter %u setup failed with error %u\n", - idx, rc); - } - - clear_filter(f); - sc->tids.ftids_in_use--; + mtx_lock(&sc->tids.ftid_lock); + if (rc == FW_FILTER_WR_FLT_ADDED) { + KASSERT(f->pending, ("%s: filter[%u] isn't pending.", + __func__, idx)); + f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + } else { + if (rc != FW_FILTER_WR_FLT_DELETED) { + /* Add or delete failed, display an error */ + log(LOG_ERR, + "filter %u setup failed with error %u\n", + idx, rc); } - wakeup(&sc->tids.ftid_tab); - mtx_unlock(&sc->tids.ftid_lock); + + clear_filter(f); + sc->tids.ftids_in_use--; } + wakeup(&sc->tids.ftid_tab); + mtx_unlock(&sc->tids.ftid_lock); return (0); } +static int +set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) +{ + + MPASS(iq->set_tcb_rpl != NULL); + return (iq->set_tcb_rpl(iq, rss, m)); +} + +static int +l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) +{ + + MPASS(iq->l2t_write_rpl != NULL); + return (iq->l2t_write_rpl(iq, rss, m)); +} + static int get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) { @@ -9469,6 +9376,10 @@ mod_event(module_t mod, int cmd, void *arg) sx_xlock(&mlu); if (loaded++ == 0) { t4_sge_modload(); + t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); + t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); + t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); + t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); sx_init(&t4_list_lock, "T4/T5 adapters"); SLIST_INIT(&t4_list); #ifdef TCP_OFFLOAD diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c index 4cf3a5352098..6bf8d2297582 100644 --- a/sys/dev/cxgbe/t4_sge.c +++ b/sys/dev/cxgbe/t4_sge.c @@ -68,6 +68,7 @@ __FBSDID("$FreeBSD$"); #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_msg.h" +#include "t4_l2t.h" #include "t4_mp_ring.h" #ifdef T4_PKT_TIMESTAMP @@ -253,12 +254,110 @@ static int sysctl_tc(SYSCTL_HANDLER_ARGS); static counter_u64_t extfree_refs; static counter_u64_t extfree_rels; +an_handler_t t4_an_handler; +fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; +cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; + + +static int +an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) +{ + +#ifdef INVARIANTS + panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); +#else + log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", + __func__, iq, ctrl); +#endif + return (EDOOFUS); +} + +int +t4_register_an_handler(an_handler_t h) +{ + uintptr_t *loc, new; + + new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; + loc = (uintptr_t *) &t4_an_handler; + atomic_store_rel_ptr(loc, new); + + return (0); +} + +static int +fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) +{ + const struct cpl_fw6_msg *cpl = + __containerof(rpl, struct cpl_fw6_msg, data[0]); + +#ifdef INVARIANTS + panic("%s: fw_msg type %d", __func__, cpl->type); +#else + log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); +#endif + return (EDOOFUS); +} + +int +t4_register_fw_msg_handler(int type, fw_msg_handler_t h) +{ + uintptr_t *loc, new; + + if (type >= nitems(t4_fw_msg_handler)) + return (EINVAL); + + /* + * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL + * handler dispatch table. Reject any attempt to install a handler for + * this subtype. + */ + if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) + return (EINVAL); + + new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; + loc = (uintptr_t *) &t4_fw_msg_handler[type]; + atomic_store_rel_ptr(loc, new); + + return (0); +} + +static int +cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) +{ + +#ifdef INVARIANTS + panic("%s: opcode 0x%02x on iq %p with payload %p", + __func__, rss->opcode, iq, m); +#else + log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", + __func__, rss->opcode, iq, m); + m_freem(m); +#endif + return (EDOOFUS); +} + +int +t4_register_cpl_handler(int opcode, cpl_handler_t h) +{ + uintptr_t *loc, new; + + if (opcode >= nitems(t4_cpl_handler)) + return (EINVAL); + + new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; + loc = (uintptr_t *) &t4_cpl_handler[opcode]; + atomic_store_rel_ptr(loc, new); + + return (0); +} + /* * Called on MOD_LOAD. Validates and calculates the SGE tunables. */ void t4_sge_modload(void) { + int i; if (fl_pktshift < 0 || fl_pktshift > 7) { printf("Invalid hw.cxgbe.fl_pktshift value (%d)," @@ -291,6 +390,18 @@ t4_sge_modload(void) extfree_rels = counter_u64_alloc(M_WAITOK); counter_u64_zero(extfree_refs); counter_u64_zero(extfree_rels); + + t4_an_handler = an_not_handled; + for (i = 0; i < nitems(t4_fw_msg_handler); i++) + t4_fw_msg_handler[i] = fw_msg_not_handled; + for (i = 0; i < nitems(t4_cpl_handler); i++) + t4_cpl_handler[i] = cpl_not_handled; + + t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); + t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); + t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); + t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx); + t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); } void @@ -312,17 +423,6 @@ t4_sge_extfree_refs(void) return (refs - rels); } -void -t4_init_sge_cpl_handlers(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); - t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); - t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); - t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); - t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); -} - static inline void setup_pad_and_pack_boundaries(struct adapter *sc) { @@ -1316,7 +1416,7 @@ service_iq(struct sge_iq *iq, int budget) KASSERT(d->rss.opcode < NUM_CPL_CMDS, ("%s: bad opcode %02x.", __func__, d->rss.opcode)); - sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0); + t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); break; case X_RSPD_TYPE_INTR: @@ -1338,7 +1438,7 @@ service_iq(struct sge_iq *iq, int budget) * iWARP async notification. */ if (lq >= 1024) { - sc->an_handler(iq, &d->rsp); + t4_an_handler(iq, &d->rsp); break; } @@ -2789,6 +2889,8 @@ alloc_fwq(struct adapter *sc) init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); fwq->flags |= IQ_INTR; /* always */ intr_idx = sc->intr_count > 1 ? 1 : 0; + fwq->set_tcb_rpl = t4_filter_rpl; + fwq->l2t_write_rpl = do_l2t_write_rpl; rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); if (rc != 0) { device_printf(sc->dev, @@ -4674,10 +4776,10 @@ handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) const struct rss_header *rss2; rss2 = (const struct rss_header *)&cpl->data[0]; - return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); + return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); } - return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); + return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); } static int diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c index c386a83fea09..f550723cb210 100644 --- a/sys/dev/cxgbe/tom/t4_connect.c +++ b/sys/dev/cxgbe/tom/t4_connect.c @@ -261,11 +261,11 @@ calc_opt2a(struct socket *so, struct toepcb *toep) } void -t4_init_connect_cpl_handlers(struct adapter *sc) +t4_init_connect_cpl_handlers(void) { - t4_register_cpl_handler(sc, CPL_ACT_ESTABLISH, do_act_establish); - t4_register_cpl_handler(sc, CPL_ACT_OPEN_RPL, do_act_open_rpl); + t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); + t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); } #define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \ diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c index 387ee5148c13..f7ef499f4488 100644 --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -1679,7 +1679,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) return (0); } -static int +int do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; @@ -1693,9 +1693,7 @@ do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) KASSERT(opcode == CPL_SET_TCB_RPL, ("%s: unexpected opcode 0x%x", __func__, opcode)); KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); - - if (is_ftid(sc, tid)) - return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */ + MPASS(iq != &sc->sge.fwq); toep = lookup_tid(sc, tid); if (toep->ulp_mode == ULP_MODE_TCPDDP) { @@ -1720,47 +1718,26 @@ do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) } void -t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl, - uint16_t word, uint64_t mask, uint64_t val) +t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid, + uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid) { struct wrqe *wr; struct cpl_set_tcb_field *req; - wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); + MPASS((cookie & ~M_COOKIE) == 0); + MPASS((iqid & ~M_QUEUENO) == 0); + + wr = alloc_wrqe(sizeof(*req), wrq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); - INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); - req->reply_ctrl = htobe16(V_NO_REPLY(1) | - V_QUEUENO(toep->ofld_rxq->iq.abs_id)); - req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); - req->mask = htobe64(mask); - req->val = htobe64(val); - - t4_wrq_tx(sc, wr); -} - -void -t4_set_tcb_field_rpl(struct adapter *sc, struct toepcb *toep, int ctrl, - uint16_t word, uint64_t mask, uint64_t val, uint8_t cookie) -{ - struct wrqe *wr; - struct cpl_set_tcb_field *req; - - KASSERT((cookie & ~M_COOKIE) == 0, ("%s: invalid cookie %#x", __func__, - cookie)); - wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq); - if (wr == NULL) { - /* XXX */ - panic("%s: allocation failure.", __func__); - } - req = wrtod(wr); - - INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid); - req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id)); + INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid); + req->reply_ctrl = htobe16(V_QUEUENO(iqid)); + if (reply == 0) + req->reply_ctrl |= htobe16(F_NO_REPLY); req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); req->mask = htobe64(mask); req->val = htobe64(val); @@ -1769,22 +1746,26 @@ t4_set_tcb_field_rpl(struct adapter *sc, struct toepcb *toep, int ctrl, } void -t4_init_cpl_io_handlers(struct adapter *sc) +t4_init_cpl_io_handlers(void) { - t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close); - t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl); - t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req); - t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl); - t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data); - t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack); - t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl); + t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); + t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); + t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); + t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); + t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); + t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); } void -t4_uninit_cpl_io_handlers(struct adapter *sc) +t4_uninit_cpl_io_handlers(void) { - t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); + t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); + t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); + t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); + t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); + t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); + t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); } #endif diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c index bcc8d1d8f528..ff73c8bb3cad 100644 --- a/sys/dev/cxgbe/tom/t4_ddp.c +++ b/sys/dev/cxgbe/tom/t4_ddp.c @@ -786,6 +786,8 @@ handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) +extern cpl_handler_t t4_cpl_handler[]; + static int do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { @@ -807,7 +809,7 @@ do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) } if (toep->ulp_mode == ULP_MODE_ISCSI) { - sc->cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); + t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); return (0); } @@ -848,13 +850,14 @@ enable_ddp(struct adapter *sc, struct toepcb *toep) DDP_ASSERT_LOCKED(toep); toep->ddp_flags |= DDP_SC_REQ; - t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS, + t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), - V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1)); - t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS, - V_TF_RCV_COALESCE_ENABLE(1), 0); + V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0, + toep->ofld_rxq->iq.abs_id); + t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS, + V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0, toep->ofld_rxq->iq.abs_id); } static int @@ -1070,9 +1073,6 @@ t4_init_ddp(struct adapter *sc, struct tom_data *td) td->ppod_start = sc->vres.ddp.start; td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start, sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT); - - t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp); - t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); } void @@ -1683,8 +1683,10 @@ t4_aio_cancel_active(struct kaiocb *job) */ valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : V_TF_DDP_BUF1_VALID(1); - t4_set_tcb_field_rpl(sc, toep, 1, W_TCB_RX_DDP_FLAGS, - valid_flag, 0, i + DDP_BUF0_INVALIDATED); + t4_set_tcb_field(sc, toep->ctrlq, toep->tid, + W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, + i + DDP_BUF0_INVALIDATED, + toep->ofld_rxq->iq.abs_id); toep->db[i].cancel_pending = 1; CTR2(KTR_CXGBE, "%s: request %p marked pending", __func__, job); @@ -1756,6 +1758,8 @@ int t4_ddp_mod_load(void) { + t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); + t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); TAILQ_INIT(&ddp_orphan_pagesets); mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); @@ -1769,5 +1773,7 @@ t4_ddp_mod_unload(void) taskqueue_drain(taskqueue_thread, &ddp_orphan_task); MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); mtx_destroy(&ddp_orphan_pagesets_lock); + t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); + t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); } #endif diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c index 22f734a26e2e..4a1a0629897b 100644 --- a/sys/dev/cxgbe/tom/t4_listen.c +++ b/sys/dev/cxgbe/tom/t4_listen.c @@ -1589,12 +1589,12 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss, } void -t4_init_listen_cpl_handlers(struct adapter *sc) +t4_init_listen_cpl_handlers(void) { - t4_register_cpl_handler(sc, CPL_PASS_OPEN_RPL, do_pass_open_rpl); - t4_register_cpl_handler(sc, CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl); - t4_register_cpl_handler(sc, CPL_PASS_ACCEPT_REQ, do_pass_accept_req); - t4_register_cpl_handler(sc, CPL_PASS_ESTABLISH, do_pass_establish); + t4_register_cpl_handler(CPL_PASS_OPEN_RPL, do_pass_open_rpl); + t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl); + t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req); + t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish); } #endif diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c index 452c47e5de96..2a8082e92865 100644 --- a/sys/dev/cxgbe/tom/t4_tom.c +++ b/sys/dev/cxgbe/tom/t4_tom.c @@ -381,8 +381,9 @@ t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) switch (name) { case TCP_NODELAY: - t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS, V_TF_NAGLE(1), - V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1)); + t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS, + V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), + 0, 0, toep->ofld_rxq->iq.abs_id); break; default: break; @@ -930,8 +931,6 @@ free_tom_data(struct adapter *sc, struct tom_data *td) KASSERT(td->lctx_count == 0, ("%s: lctx hash table is not empty.", __func__)); - t4_uninit_l2t_cpl_handlers(sc); - t4_uninit_cpl_io_handlers(sc); t4_uninit_ddp(sc, td); destroy_clip_table(sc, td); @@ -997,7 +996,8 @@ t4_tom_activate(struct adapter *sc) struct tom_data *td; struct toedev *tod; struct vi_info *vi; - int i, rc, v; + struct sge_ofld_rxq *ofld_rxq; + int i, j, rc, v; ASSERT_SYNCHRONIZED_OP(sc); @@ -1031,12 +1031,6 @@ t4_tom_activate(struct adapter *sc) /* CLIP table for IPv6 offload */ init_clip_table(sc, td); - /* CPL handlers */ - t4_init_connect_cpl_handlers(sc); - t4_init_l2t_cpl_handlers(sc); - t4_init_listen_cpl_handlers(sc); - t4_init_cpl_io_handlers(sc); - /* toedev ops */ tod = &td->tod; init_toedev(tod); @@ -1059,6 +1053,10 @@ t4_tom_activate(struct adapter *sc) for_each_port(sc, i) { for_each_vi(sc->port[i], v, vi) { TOEDEV(vi->ifp) = &td->tod; + for_each_ofld_rxq(vi, j, ofld_rxq) { + ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl; + ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2; + } } } @@ -1127,6 +1125,11 @@ t4_tom_mod_load(void) int rc; struct protosw *tcp_protosw, *tcp6_protosw; + /* CPL handlers */ + t4_init_connect_cpl_handlers(); + t4_init_listen_cpl_handlers(); + t4_init_cpl_io_handlers(); + rc = t4_ddp_mod_load(); if (rc != 0) return (rc); diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h index 09238a49b346..f114a3dfb429 100644 --- a/sys/dev/cxgbe/tom/t4_tom.h +++ b/sys/dev/cxgbe/tom/t4_tom.h @@ -294,13 +294,13 @@ struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *); void release_lip(struct tom_data *, struct clip_entry *); /* t4_connect.c */ -void t4_init_connect_cpl_handlers(struct adapter *); +void t4_init_connect_cpl_handlers(void); int t4_connect(struct toedev *, struct socket *, struct rtentry *, struct sockaddr *); void act_open_failure_cleanup(struct adapter *, u_int, u_int); /* t4_listen.c */ -void t4_init_listen_cpl_handlers(struct adapter *); +void t4_init_listen_cpl_handlers(void); int t4_listen_start(struct toedev *, struct tcpcb *); int t4_listen_stop(struct toedev *, struct tcpcb *); void t4_syncache_added(struct toedev *, void *); @@ -313,8 +313,8 @@ int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *, void t4_offload_socket(struct toedev *, void *, struct socket *); /* t4_cpl_io.c */ -void t4_init_cpl_io_handlers(struct adapter *); -void t4_uninit_cpl_io_handlers(struct adapter *); +void t4_init_cpl_io_handlers(void); +void t4_uninit_cpl_io_handlers(void); void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int); void send_flowc_wr(struct toepcb *, struct flowc_tx_params *); void send_reset(struct adapter *, struct toepcb *, uint32_t); @@ -324,12 +324,11 @@ void t4_rcvd_locked(struct toedev *, struct tcpcb *); int t4_tod_output(struct toedev *, struct tcpcb *); int t4_send_fin(struct toedev *, struct tcpcb *); int t4_send_rst(struct toedev *, struct tcpcb *); -void t4_set_tcb_field(struct adapter *, struct toepcb *, int, uint16_t, - uint64_t, uint64_t); -void t4_set_tcb_field_rpl(struct adapter *, struct toepcb *, int, uint16_t, - uint64_t, uint64_t, uint8_t); +void t4_set_tcb_field(struct adapter *, struct sge_wrq *, int, uint16_t, + uint64_t, uint64_t, int, int, int); void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop); void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop); +int do_set_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); /* t4_ddp.c */ void t4_init_ddp(struct adapter *, struct tom_data *); diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c index 8aadf34a9b5e..f89df02e1000 100644 --- a/sys/dev/cxgbe/tom/t4_tom_l2t.c +++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c @@ -219,7 +219,7 @@ update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr, memcpy(e->dmac, lladdr, ETHER_ADDR_LEN); e->vlan = vtag; - t4_write_l2e(sc, e, 1); + t4_write_l2e(e, 1); } e->state = L2T_STATE_VALID; } @@ -309,19 +309,7 @@ t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e) return (0); } -/* - * Called when an L2T entry has no more users. The entry is left in the hash - * table since it is likely to be reused but we also bump nfree to indicate - * that the entry can be reallocated for a different neighbor. We also drop - * the existing neighbor reference in case the neighbor is going away and is - * waiting on our reference. - * - * Because entries can be reallocated to other neighbors once their ref count - * drops to 0 we need to take the entry's lock to avoid races with a new - * incarnation. - */ - -static int +int do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { @@ -329,11 +317,13 @@ do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss, const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); unsigned int tid = GET_TID(rpl); unsigned int idx = tid % L2T_SIZE; - int rc; - rc = do_l2t_write_rpl(iq, rss, m); - if (rc != 0) - return (rc); + if (__predict_false(rpl->status != CPL_ERR_NONE)) { + log(LOG_ERR, + "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n", + rpl->status, idx); + return (EINVAL); + } if (tid & F_SYNC_WR) { struct l2t_entry *e = &sc->l2t->l2tab[idx - sc->vres.l2t.start]; @@ -349,20 +339,6 @@ do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss, return (0); } -void -t4_init_l2t_cpl_handlers(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl2); -} - -void -t4_uninit_l2t_cpl_handlers(struct adapter *sc) -{ - - t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl); -} - /* * The TOE wants an L2 table entry that it can use to reach the next hop over * the specified port. Produce such an entry - create one if needed. @@ -374,7 +350,8 @@ struct l2t_entry * t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa) { struct l2t_entry *e; - struct l2t_data *d = pi->adapter->l2t; + struct adapter *sc = pi->adapter; + struct l2t_data *d = sc->l2t; u_int hash, smt_idx = pi->port_id; KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6, @@ -409,6 +386,8 @@ t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa) e->smt_idx = smt_idx; e->hash = hash; e->lport = pi->lport; + e->wrq = &sc->sge.ctrlq[pi->port_id]; + e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id; atomic_store_rel_int(&e->refcnt, 1); #ifdef VLAN_TAG if (ifp->if_type == IFT_L2VLAN) diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.h b/sys/dev/cxgbe/tom/t4_tom_l2t.h index 3d76735707e3..9a7fb39fbd79 100644 --- a/sys/dev/cxgbe/tom/t4_tom_l2t.h +++ b/sys/dev/cxgbe/tom/t4_tom_l2t.h @@ -37,8 +37,8 @@ struct l2t_entry *t4_l2t_get(struct port_info *, struct ifnet *, struct sockaddr *); void t4_l2_update(struct toedev *, struct ifnet *, struct sockaddr *, uint8_t *, uint16_t); -void t4_init_l2t_cpl_handlers(struct adapter *); -void t4_uninit_l2t_cpl_handlers(struct adapter *); +int do_l2t_write_rpl2(struct sge_iq *, const struct rss_header *, + struct mbuf *); static inline int t4_l2t_send(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)