cxgbe(4): Add functions to help synchronize "slow" operations (those not

on the fast data path) and use them instead of frobbing the adapter lock
and busy flag directly.

Other changes made while reworking all slow operations:
- Wait for the reply to a filter request (add/delete).  This guarantees
  that the operation is complete by the time the ioctl returns.
- Tidy up the tid_info structure.
- Do not allow the tx queue size to be set to something that's not a
  power of 2.

MFC after:	1 week
This commit is contained in:
Navdeep Parhar 2013-01-10 23:56:50 +00:00
parent bfd6a1d202
commit b174b65819
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=245274
4 changed files with 395 additions and 280 deletions

View File

@ -157,6 +157,16 @@ enum {
INTR_MSIX = (1 << 2)
};
enum {
/* flags understood by begin_synchronized_op */
HOLD_LOCK = (1 << 0),
SLEEP_OK = (1 << 1),
INTR_OK = (1 << 2),
/* flags understood by end_synchronized_op */
LOCK_HELD = HOLD_LOCK,
};
enum {
/* adapter flags */
FULL_INIT_DONE = (1 << 0),
@ -174,11 +184,11 @@ enum {
PORT_SYSCTL_CTX = (1 << 2),
};
#define IS_DOOMED(pi) (pi->flags & DOOMED)
#define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0)
#define IS_BUSY(sc) (sc->flags & CXGBE_BUSY)
#define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0)
#define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0)
#define IS_DOOMED(pi) ((pi)->flags & DOOMED)
#define SET_DOOMED(pi) do {(pi)->flags |= DOOMED;} while (0)
#define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
#define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
#define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
struct port_info {
device_t dev;
@ -591,6 +601,11 @@ struct adapter {
an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
fw_msg_handler_t fw_msg_handler[4]; /* NUM_FW6_TYPES */
cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
#ifdef INVARIANTS
const char *last_op;
const void *last_op_thr;
#endif
};
#define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
@ -598,6 +613,12 @@ struct adapter {
#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
/* XXX: not bulletproof, but much better than nothing */
#define ASSERT_SYNCHRONIZED_OP(sc) \
KASSERT(IS_BUSY(sc) && \
(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
("%s: operation not synchronized.", __func__))
#define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
#define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
#define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
@ -751,6 +772,8 @@ int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
int t4_register_an_handler(struct adapter *, an_handler_t);
int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
void end_synchronized_op(struct adapter *, int);
/* t4_sge.c */
void t4_sge_modload(void);

View File

@ -75,29 +75,27 @@ union aopen_entry {
*/
struct tid_info {
void **tid_tab;
unsigned int ntids;
u_int ntids;
u_int tids_in_use;
struct mtx stid_lock __aligned(CACHE_LINE_SIZE);
union serv_entry *stid_tab;
unsigned int nstids;
unsigned int stid_base;
union aopen_entry *atid_tab;
unsigned int natids;
struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int ftids_in_use;
struct mtx atid_lock;
union aopen_entry *afree;
unsigned int atids_in_use;
struct mtx stid_lock;
u_int nstids;
u_int stid_base;
union serv_entry *sfree;
unsigned int stids_in_use;
u_int stids_in_use;
unsigned int tids_in_use;
struct mtx atid_lock __aligned(CACHE_LINE_SIZE);
union aopen_entry *atid_tab;
u_int natids;
union aopen_entry *afree;
u_int atids_in_use;
struct mtx ftid_lock __aligned(CACHE_LINE_SIZE);
struct filter_entry *ftid_tab;
u_int nftids;
u_int ftid_base;
u_int ftids_in_use;
};
struct t4_range {

File diff suppressed because it is too large Load Diff

View File

@ -602,7 +602,7 @@ t4_tom_activate(struct adapter *sc)
struct toedev *tod;
int i, rc;
ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
ASSERT_SYNCHRONIZED_OP(sc);
/* per-adapter softc for TOM */
td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
@ -668,7 +668,7 @@ t4_tom_deactivate(struct adapter *sc)
int rc = 0;
struct tom_data *td = sc->tom_softc;
ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
ASSERT_SYNCHRONIZED_OP(sc);
if (td == NULL)
return (0); /* XXX. KASSERT? */
@ -721,11 +721,14 @@ t4_tom_mod_load(void)
static void
tom_uninit(struct adapter *sc, void *arg __unused)
{
if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomun"))
return;
/* Try to free resources (works only if no port has IFCAP_TOE) */
ADAPTER_LOCK(sc);
if (sc->flags & TOM_INIT_DONE)
t4_deactivate_uld(sc, ULD_TOM);
ADAPTER_UNLOCK(sc);
end_synchronized_op(sc, LOCK_HELD);
}
static int