diff --git a/bin/csh/csh.login b/bin/csh/csh.login index 283c6d4fddbc..b55ae087590a 100644 --- a/bin/csh/csh.login +++ b/bin/csh/csh.login @@ -1,9 +1,6 @@ # $FreeBSD$ # # System-wide .login file for csh(1). -# Uncomment this to give you the default 4.2 behavior, where disk -# information is shown in K-Blocks -# setenv BLOCKSIZE K # # For the setting of languages and character sets please see # login.conf(5) and in particular the charset and lang options. diff --git a/bin/csh/dot.cshrc b/bin/csh/dot.cshrc index 1fcc9917f0f9..f7a9a3ed0226 100644 --- a/bin/csh/dot.cshrc +++ b/bin/csh/dot.cshrc @@ -19,7 +19,6 @@ set path = (/sbin /bin /usr/sbin /usr/bin /usr/local/sbin /usr/local/bin $HOME/b setenv EDITOR vi setenv PAGER less -setenv BLOCKSIZE K if ($?prompt) then # An interactive shell -- set some stuff up diff --git a/bin/sh/histedit.c b/bin/sh/histedit.c index 8e2f927cfe06..62f5a89a05e1 100644 --- a/bin/sh/histedit.c +++ b/bin/sh/histedit.c @@ -67,7 +67,7 @@ __FBSDID("$FreeBSD$"); History *hist; /* history cookie */ EditLine *el; /* editline cookie */ int displayhist; -static FILE *el_in, *el_out, *el_err; +static FILE *el_in, *el_out; static char *fc_replace(const char *, char *, char *); static int not_fcnumber(const char *); @@ -106,18 +106,16 @@ histedit(void) INTOFF; if (el_in == NULL) el_in = fdopen(0, "r"); - if (el_err == NULL) - el_err = fdopen(1, "w"); if (el_out == NULL) el_out = fdopen(2, "w"); - if (el_in == NULL || el_err == NULL || el_out == NULL) + if (el_in == NULL || el_out == NULL) goto bad; term = lookupvar("TERM"); if (term) setenv("TERM", term, 1); else unsetenv("TERM"); - el = el_init(arg0, el_in, el_out, el_err); + el = el_init(arg0, el_in, el_out, el_out); if (el != NULL) { if (hist) el_set(el, EL_HIST, history, hist); diff --git a/bin/sh/profile b/bin/sh/profile index 23c993c40214..b77c8787c79d 100644 --- a/bin/sh/profile +++ b/bin/sh/profile @@ -2,10 +2,6 @@ # # System-wide .profile file for sh(1). # -# Uncomment this to give you the default 4.2 behavior, where disk -# information is shown in K-Blocks -# BLOCKSIZE=K; export BLOCKSIZE -# # For the setting of languages and character sets please see # login.conf(5) and in particular the charset and lang options. # For full locales list check /usr/share/locale/* diff --git a/lib/libcapsicum/capsicum_helpers.3 b/lib/libcapsicum/capsicum_helpers.3 index 8d4b875bf94c..e3d3cd7e9abb 100644 --- a/lib/libcapsicum/capsicum_helpers.3 +++ b/lib/libcapsicum/capsicum_helpers.3 @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd November 4, 2018 +.Dd January 21, 2019 .Dt CAPSICUM_HELPERS 3 .Os .Sh NAME @@ -50,11 +50,13 @@ .Ft int .Fn caph_enter_casper "void" .Ft int -.Fn caph_rights_limit "inf fd" "const cap_righst_t *rights" +.Fn caph_rights_limit "int fd" "const cap_righst_t *rights" .Ft int -.Fn caph_ioctls_limit "inf fd" "const unsigned long *cmds" "size_t ncmds" +.Fn caph_ioctls_limit "int fd" "const unsigned long *cmds" "size_t ncmds" .Ft int -.Fn caph_fcntls_limit "inf fd" "uint32_t fcntlrights" +.Fn caph_fcntls_limit "int fd" "uint32_t fcntlrights" +.Ft int +.Fn caph_limit_stream "int fd" "int flags" .Ft int .Fn caph_limit_stdin "void" .Ft int diff --git a/sbin/devd/devd.conf b/sbin/devd/devd.conf index 8a6151cfd899..572e53714268 100644 --- a/sbin/devd/devd.conf +++ b/sbin/devd/devd.conf @@ -23,8 +23,8 @@ options { esp|ida|iir|ips|isp|mlx|mly|mpr|mps|mpt|sym|trm)\ [0-9]+"; set wifi-driver-regex - "(ath|bwi|bwn|ipw|iwi|iwm|iwn|malo|mwl|ral|rsu|rtwn|rum|run|\ - uath|upgt|ural|urtw|wi|wpi|wtap|zyd)[0-9]+"; + "(ath|bwi|bwn|ipw|iwi|iwm|iwn|malo|mwl|otus|ral|rsu|rtwn|rum|\ + run|uath|upgt|ural|urtw|wi|wpi|wtap|zyd)[0-9]+"; }; # Note that the attach/detach with the highest value wins, so that one can @@ -43,7 +43,7 @@ options { # notify 0 { match "system" "IFNET"; - match "subsystem" "!usbus[0-9]+"; + match "subsystem" "(?!usbus[0-9]+|?!wlan[0-9]+)"; match "type" "ATTACH"; action "/etc/pccard_ether $subsystem start"; }; diff --git a/sbin/ifconfig/ifieee80211.c b/sbin/ifconfig/ifieee80211.c index 63347ed805e8..2e648d8feb48 100644 --- a/sbin/ifconfig/ifieee80211.c +++ b/sbin/ifconfig/ifieee80211.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include @@ -3129,13 +3130,6 @@ printrsnie(const char *tag, const u_int8_t *ie, size_t ielen, int maxlen) } } -/* XXX move to a public include file */ -#define IEEE80211_WPS_DEV_PASS_ID 0x1012 -#define IEEE80211_WPS_SELECTED_REG 0x1041 -#define IEEE80211_WPS_SETUP_STATE 0x1044 -#define IEEE80211_WPS_UUID_E 0x1047 -#define IEEE80211_WPS_VERSION 0x104a - #define BE_READ_2(p) \ ((u_int16_t) \ ((((const u_int8_t *)(p))[1] ) | \ @@ -3157,6 +3151,7 @@ printwpsie(const char *tag, const u_int8_t *ie, size_t ielen, int maxlen) "R" /* Registrar-specified */ }; int n; + int f; ie +=6, len -= 4; /* NB: len is payload only */ @@ -3165,6 +3160,7 @@ printwpsie(const char *tag, const u_int8_t *ie, size_t ielen, int maxlen) while (len) { uint16_t tlv_type = BE_READ_2(ie); uint16_t tlv_len = BE_READ_2(ie + 2); + uint16_t cfg_mthd; /* some devices broadcast invalid WPS frames */ if (tlv_len > len) { @@ -3177,30 +3173,191 @@ printwpsie(const char *tag, const u_int8_t *ie, size_t ielen, int maxlen) ie += 4, len -= 4; switch (tlv_type) { - case IEEE80211_WPS_VERSION: + case IEEE80211_WPS_ATTR_VERSION: printf("v:%d.%d", *ie >> 4, *ie & 0xf); break; - case IEEE80211_WPS_SETUP_STATE: - /* Only 1 and 2 are valid */ - if (*ie == 0 || *ie >= 3) - printf(" state:B"); + case IEEE80211_WPS_ATTR_AP_SETUP_LOCKED: + printf(" ap_setup:%s", *ie ? "locked" : + "unlocked"); + break; + case IEEE80211_WPS_ATTR_CONFIG_METHODS: + case IEEE80211_WPS_ATTR_SELECTED_REGISTRAR_CONFIG_METHODS: + if (tlv_type == IEEE80211_WPS_ATTR_SELECTED_REGISTRAR_CONFIG_METHODS) + printf(" sel_reg_cfg_mthd:"); else - printf(" st:%s", *ie == 1 ? "N" : "C"); + printf(" cfg_mthd:" ); + cfg_mthd = BE_READ_2(ie); + f = 0; + for (n = 15; n >= 0; n--) { + if (f) { + printf(","); + f = 0; + } + switch (cfg_mthd & (1 << n)) { + case 0: + break; + case IEEE80211_WPS_CONFIG_USBA: + printf("usba"); + f++; + break; + case IEEE80211_WPS_CONFIG_ETHERNET: + printf("ethernet"); + f++; + break; + case IEEE80211_WPS_CONFIG_LABEL: + printf("label"); + f++; + break; + case IEEE80211_WPS_CONFIG_DISPLAY: + if (!(cfg_mthd & + (IEEE80211_WPS_CONFIG_VIRT_DISPLAY | + IEEE80211_WPS_CONFIG_PHY_DISPLAY))) + { + printf("display"); + f++; + } + break; + case IEEE80211_WPS_CONFIG_EXT_NFC_TOKEN: + printf("ext_nfc_tokenk"); + f++; + break; + case IEEE80211_WPS_CONFIG_INT_NFC_TOKEN: + printf("int_nfc_token"); + f++; + break; + case IEEE80211_WPS_CONFIG_NFC_INTERFACE: + printf("nfc_interface"); + f++; + break; + case IEEE80211_WPS_CONFIG_PUSHBUTTON: + if (!(cfg_mthd & + (IEEE80211_WPS_CONFIG_VIRT_PUSHBUTTON | + IEEE80211_WPS_CONFIG_PHY_PUSHBUTTON))) { + printf("push_button"); + f++; + } + break; + case IEEE80211_WPS_CONFIG_KEYPAD: + printf("keypad"); + f++; + break; + case IEEE80211_WPS_CONFIG_VIRT_PUSHBUTTON: + printf("virtual_push_button"); + f++; + break; + case IEEE80211_WPS_CONFIG_PHY_PUSHBUTTON: + printf("physical_push_button"); + f++; + break; + case IEEE80211_WPS_CONFIG_P2PS: + printf("p2ps"); + f++; + break; + case IEEE80211_WPS_CONFIG_VIRT_DISPLAY: + printf("virtual_display"); + f++; + break; + case IEEE80211_WPS_CONFIG_PHY_DISPLAY: + printf("physical_display"); + f++; + break; + default: + printf("unknown_wps_config<%04x>", + cfg_mthd & (1 << n)); + f++; + break; + } + } break; - case IEEE80211_WPS_SELECTED_REG: - printf(" sel:%s", *ie ? "T" : "F"); + case IEEE80211_WPS_ATTR_DEV_NAME: + printf(" device_name:<%.*s>", tlv_len, ie); break; - case IEEE80211_WPS_DEV_PASS_ID: + case IEEE80211_WPS_ATTR_DEV_PASSWORD_ID: n = LE_READ_2(ie); if (n < nitems(dev_pass_id)) printf(" dpi:%s", dev_pass_id[n]); break; - case IEEE80211_WPS_UUID_E: + case IEEE80211_WPS_ATTR_MANUFACTURER: + printf(" manufacturer:<%.*s>", tlv_len, ie); + break; + case IEEE80211_WPS_ATTR_MODEL_NAME: + printf(" model_name:<%.*s>", tlv_len, ie); + break; + case IEEE80211_WPS_ATTR_MODEL_NUMBER: + printf(" model_number:<%.*s>", tlv_len, ie); + break; + case IEEE80211_WPS_ATTR_PRIMARY_DEV_TYPE: + printf(" prim_dev:"); + for (n = 0; n < tlv_len; n++) + printf("%02x", ie[n]); + break; + case IEEE80211_WPS_ATTR_RF_BANDS: + printf(" rf:"); + f = 0; + for (n = 7; n >= 0; n--) { + if (f) { + printf(","); + f = 0; + } + switch (*ie & (1 << n)) { + case 0: + break; + case IEEE80211_WPS_RF_BAND_24GHZ: + printf("2.4Ghz"); + f++; + break; + case IEEE80211_WPS_RF_BAND_50GHZ: + printf("5Ghz"); + f++; + break; + case IEEE80211_WPS_RF_BAND_600GHZ: + printf("60Ghz"); + f++; + break; + default: + printf("unknown<%02x>", + *ie & (1 << n)); + f++; + break; + } + } + break; + case IEEE80211_WPS_ATTR_RESPONSE_TYPE: + printf(" resp_type:0x%02x", *ie); + break; + case IEEE80211_WPS_ATTR_SELECTED_REGISTRAR: + printf(" sel:%s", *ie ? "T" : "F"); + break; + case IEEE80211_WPS_ATTR_SERIAL_NUMBER: + printf(" serial_number:<%.*s>", tlv_len, ie); + break; + case IEEE80211_WPS_ATTR_UUID_E: printf(" uuid-e:"); for (n = 0; n < (tlv_len - 1); n++) printf("%02x-", ie[n]); printf("%02x", ie[n]); break; + case IEEE80211_WPS_ATTR_VENDOR_EXT: + printf(" vendor:"); + for (n = 0; n < tlv_len; n++) + printf("%02x", ie[n]); + break; + case IEEE80211_WPS_ATTR_WPS_STATE: + switch (*ie) { + case IEEE80211_WPS_STATE_NOT_CONFIGURED: + printf(" state:N"); + break; + case IEEE80211_WPS_STATE_CONFIGURED: + printf(" state:C"); + break; + default: + printf(" state:B<%02x>", *ie); + break; + } + break; + default: + printf(" unknown_wps_attr:0x%x", tlv_type); + break; } ie += tlv_len, len -= tlv_len; } @@ -3353,6 +3510,7 @@ iswpsoui(const uint8_t *frm) static const char * iename(int elemid) { + static char iename_buf[64]; switch (elemid) { case IEEE80211_ELEMID_FHPARMS: return " FHPARMS"; case IEEE80211_ELEMID_CFPARMS: return " CFPARMS"; @@ -3370,10 +3528,21 @@ iename(int elemid) case IEEE80211_ELEMID_MEASREP: return " MEASREP"; case IEEE80211_ELEMID_QUIET: return " QUIET"; case IEEE80211_ELEMID_IBSSDFS: return " IBSSDFS"; + case IEEE80211_ELEMID_RESERVED_47: + return " RESERVED_47"; + case IEEE80211_ELEMID_MOBILITY_DOMAIN: + return " MOBILITY_DOMAIN"; + case IEEE80211_ELEMID_RRM_ENACAPS: + return " RRM_ENCAPS"; + case IEEE80211_ELEMID_OVERLAP_BSS_SCAN_PARAM: + return " OVERLAP_BSS"; case IEEE80211_ELEMID_TPC: return " TPC"; case IEEE80211_ELEMID_CCKM: return " CCKM"; + case IEEE80211_ELEMID_EXTCAP: return " EXTCAP"; } - return " ???"; + snprintf(iename_buf, sizeof(iename_buf), " UNKNOWN_ELEMID_%d", + elemid); + return (const char *) iename_buf; } static void diff --git a/sbin/pfctl/pfctl.h b/sbin/pfctl/pfctl.h index a432f109726a..f43b71e19fec 100644 --- a/sbin/pfctl/pfctl.h +++ b/sbin/pfctl/pfctl.h @@ -114,7 +114,6 @@ extern int loadopt; int check_commit_altq(int, int); void pfaltq_store(struct pf_altq *); -struct pf_altq *pfaltq_lookup(const char *); char *rate2str(double); void print_addr(struct pf_addr_wrap *, sa_family_t, int); diff --git a/sbin/pfctl/pfctl_altq.c b/sbin/pfctl/pfctl_altq.c index e9218e0b2d65..e5dad956f83f 100644 --- a/sbin/pfctl/pfctl_altq.c +++ b/sbin/pfctl/pfctl_altq.c @@ -24,6 +24,7 @@ __FBSDID("$FreeBSD$"); #define PFIOC_USE_LATEST #include +#include #include #include @@ -36,6 +37,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -53,38 +55,44 @@ __FBSDID("$FreeBSD$"); #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0)) -static TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs); -static LIST_HEAD(gen_sc, segment) rtsc, lssc; +static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces); +static struct hsearch_data queue_map; +static struct hsearch_data if_map; +static struct hsearch_data qid_map; -struct pf_altq *qname_to_pfaltq(const char *, const char *); -u_int32_t qname_to_qid(const char *); +static struct pfctl_altq *pfaltq_lookup(char *ifname); +static struct pfctl_altq *qname_to_pfaltq(const char *, const char *); +static u_int32_t qname_to_qid(char *); -static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *); +static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *, + struct pfctl_altq *); static int cbq_compute_idletime(struct pfctl *, struct pf_altq *); -static int check_commit_cbq(int, int, struct pf_altq *); +static int check_commit_cbq(int, int, struct pfctl_altq *); static int print_cbq_opts(const struct pf_altq *); static int print_codel_opts(const struct pf_altq *, const struct node_queue_opt *); -static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *); -static int check_commit_priq(int, int, struct pf_altq *); +static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *, + struct pfctl_altq *); +static int check_commit_priq(int, int, struct pfctl_altq *); static int print_priq_opts(const struct pf_altq *); -static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *); -static int check_commit_hfsc(int, int, struct pf_altq *); +static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *, + struct pfctl_altq *, struct pfctl_altq *); +static int check_commit_hfsc(int, int, struct pfctl_altq *); static int print_hfsc_opts(const struct pf_altq *, const struct node_queue_opt *); -static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *); +static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *, + struct pfctl_altq *, struct pfctl_altq *); static int print_fairq_opts(const struct pf_altq *, const struct node_queue_opt *); -static int check_commit_fairq(int, int, struct pf_altq *); +static int check_commit_fairq(int, int, struct pfctl_altq *); static void gsc_add_sc(struct gen_sc *, struct service_curve *); static int is_gsc_under_sc(struct gen_sc *, struct service_curve *); -static void gsc_destroy(struct gen_sc *); static struct segment *gsc_getentry(struct gen_sc *, double); static int gsc_add_seg(struct gen_sc *, double, double, double, double); @@ -104,59 +112,101 @@ void print_hfsc_sc(const char *, u_int, u_int, u_int, void print_fairq_sc(const char *, u_int, u_int, u_int, const struct node_fairq_sc *); +static __attribute__((constructor)) void +pfctl_altq_init(void) +{ + /* + * As hdestroy() will never be called on these tables, it will be + * safe to use references into the stored data as keys. + */ + if (hcreate_r(0, &queue_map) == 0) + err(1, "Failed to create altq queue map"); + if (hcreate_r(0, &if_map) == 0) + err(1, "Failed to create altq interface map"); + if (hcreate_r(0, &qid_map) == 0) + err(1, "Failed to create altq queue id map"); +} + void pfaltq_store(struct pf_altq *a) { - struct pf_altq *altq; - + struct pfctl_altq *altq; + ENTRY item; + ENTRY *ret_item; + size_t key_size; + if ((altq = malloc(sizeof(*altq))) == NULL) - err(1, "malloc"); - memcpy(altq, a, sizeof(struct pf_altq)); - TAILQ_INSERT_TAIL(&altqs, altq, entries); -} + err(1, "queue malloc"); + memcpy(&altq->pa, a, sizeof(struct pf_altq)); + memset(&altq->meta, 0, sizeof(altq->meta)); -struct pf_altq * -pfaltq_lookup(const char *ifname) -{ - struct pf_altq *altq; + if (a->qname[0] == 0) { + item.key = altq->pa.ifname; + item.data = altq; + if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0) + err(1, "interface map insert"); + STAILQ_INSERT_TAIL(&interfaces, altq, meta.link); + } else { + key_size = sizeof(a->ifname) + sizeof(a->qname); + if ((item.key = malloc(key_size)) == NULL) + err(1, "queue map key malloc"); + snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname); + item.data = altq; + if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0) + err(1, "queue map insert"); - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && - altq->qname[0] == 0) - return (altq); + item.key = altq->pa.qname; + item.data = &altq->pa.qid; + if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0) + err(1, "qid map insert"); } - return (NULL); } -struct pf_altq * +static struct pfctl_altq * +pfaltq_lookup(char *ifname) +{ + ENTRY item; + ENTRY *ret_item; + + item.key = ifname; + if (hsearch_r(item, FIND, &ret_item, &if_map) == 0) + return (NULL); + + return (ret_item->data); +} + +static struct pfctl_altq * qname_to_pfaltq(const char *qname, const char *ifname) { - struct pf_altq *altq; + ENTRY item; + ENTRY *ret_item; + char key[IFNAMSIZ + PF_QNAME_SIZE]; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && - strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) - return (altq); - } - return (NULL); + item.key = key; + snprintf(item.key, sizeof(key), "%s:%s", ifname, qname); + if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0) + return (NULL); + + return (ret_item->data); } -u_int32_t -qname_to_qid(const char *qname) +static u_int32_t +qname_to_qid(char *qname) { - struct pf_altq *altq; - + ENTRY item; + ENTRY *ret_item; + uint32_t qid; + /* * We guarantee that same named queues on different interfaces - * have the same qid, so we do NOT need to limit matching on - * one interface! + * have the same qid. */ + item.key = qname; + if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0) + return (0); - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0) - return (altq->qid); - } - return (0); + qid = *(uint32_t *)ret_item->data; + return (qid); } void @@ -315,28 +365,26 @@ eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, int check_commit_altq(int dev, int opts) { - struct pf_altq *altq; - int error = 0; + struct pfctl_altq *if_ppa; + int error = 0; /* call the discipline check for each interface. */ - TAILQ_FOREACH(altq, &altqs, entries) { - if (altq->qname[0] == 0) { - switch (altq->scheduler) { - case ALTQT_CBQ: - error = check_commit_cbq(dev, opts, altq); - break; - case ALTQT_PRIQ: - error = check_commit_priq(dev, opts, altq); - break; - case ALTQT_HFSC: - error = check_commit_hfsc(dev, opts, altq); - break; - case ALTQT_FAIRQ: - error = check_commit_fairq(dev, opts, altq); - break; - default: - break; - } + STAILQ_FOREACH(if_ppa, &interfaces, meta.link) { + switch (if_ppa->pa.scheduler) { + case ALTQT_CBQ: + error = check_commit_cbq(dev, opts, if_ppa); + break; + case ALTQT_PRIQ: + error = check_commit_priq(dev, opts, if_ppa); + break; + case ALTQT_HFSC: + error = check_commit_hfsc(dev, opts, if_ppa); + break; + case ALTQT_FAIRQ: + error = check_commit_fairq(dev, opts, if_ppa); + break; + default: + break; } } return (error); @@ -350,17 +398,16 @@ eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, struct node_queue_opt *opts) { /* should be merged with expand_queue */ - struct pf_altq *if_pa, *parent, *altq; - u_int64_t bwsum; - int error = 0; + struct pfctl_altq *if_ppa, *parent; + int error = 0; /* find the corresponding interface and copy fields used by queues */ - if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) { + if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) { fprintf(stderr, "altq not defined on %s\n", pa->ifname); return (1); } - pa->scheduler = if_pa->scheduler; - pa->ifbandwidth = if_pa->ifbandwidth; + pa->scheduler = if_ppa->pa.scheduler; + pa->ifbandwidth = if_ppa->pa.ifbandwidth; if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) { fprintf(stderr, "queue %s already exists on interface %s\n", @@ -377,15 +424,31 @@ eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, pa->parent, pa->qname); return (1); } - pa->parent_qid = parent->qid; + pa->parent_qid = parent->pa.qid; } if (pa->qlimit == 0) pa->qlimit = DEFAULT_QLIMIT; + if (eval_queue_opts(pa, opts, + parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth)) + return (1); + if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC || pa->scheduler == ALTQT_FAIRQ) { pa->bandwidth = eval_bwspec(bw, - parent == NULL ? pa->ifbandwidth : parent->bandwidth); + parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth); + + /* + * For HFSC, if the linkshare service curve m2 parameter is + * set, it overrides the provided queue bandwidth parameter, + * so adjust the queue bandwidth parameter accordingly here + * to avoid false positives in the total child bandwidth + * check below. + */ + if ((pa->scheduler == ALTQT_HFSC) && + (pa->pq_u.hfsc_opts.lssc_m2 != 0)) { + pa->bandwidth = pa->pq_u.hfsc_opts.lssc_m2; + } if (pa->bandwidth > pa->ifbandwidth) { fprintf(stderr, "bandwidth for %s higher than " @@ -394,44 +457,36 @@ eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, } /* check the sum of the child bandwidth is under parent's */ if (parent != NULL) { - if (pa->bandwidth > parent->bandwidth) { + if (pa->bandwidth > parent->pa.bandwidth) { warnx("bandwidth for %s higher than parent", pa->qname); return (1); } - bwsum = 0; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, - IFNAMSIZ) == 0 && - altq->qname[0] != 0 && - strncmp(altq->parent, pa->parent, - PF_QNAME_SIZE) == 0) - bwsum += altq->bandwidth; - } - bwsum += pa->bandwidth; - if (bwsum > parent->bandwidth) { - warnx("the sum of the child bandwidth higher" - " than parent \"%s\"", parent->qname); + parent->meta.bwsum += pa->bandwidth; + if (parent->meta.bwsum > parent->pa.bandwidth) { + warnx("the sum of the child bandwidth (%" PRIu64 + ") higher than parent \"%s\" (%" PRIu64 ")", + parent->meta.bwsum, parent->pa.qname, + parent->pa.bandwidth); } } } - if (eval_queue_opts(pa, opts, - parent == NULL ? pa->ifbandwidth : parent->bandwidth)) - return (1); - + if (parent != NULL) + parent->meta.children++; + switch (pa->scheduler) { case ALTQT_CBQ: - error = eval_pfqueue_cbq(pf, pa); + error = eval_pfqueue_cbq(pf, pa, if_ppa); break; case ALTQT_PRIQ: - error = eval_pfqueue_priq(pf, pa); + error = eval_pfqueue_priq(pf, pa, if_ppa); break; case ALTQT_HFSC: - error = eval_pfqueue_hfsc(pf, pa); + error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent); break; case ALTQT_FAIRQ: - error = eval_pfqueue_fairq(pf, pa); + error = eval_pfqueue_fairq(pf, pa, if_ppa, parent); break; default: break; @@ -446,7 +501,7 @@ eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw, #define RM_NS_PER_SEC (1000000000) static int -eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa) +eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa) { struct cbq_opts *opts; u_int ifmtu; @@ -476,6 +531,11 @@ eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa) if (pa->parent[0] == 0) opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR); + if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) + if_ppa->meta.root_classes++; + if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) + if_ppa->meta.default_classes++; + cbq_compute_idletime(pf, pa); return (0); } @@ -568,33 +628,20 @@ cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa) } static int -check_commit_cbq(int dev, int opts, struct pf_altq *pa) +check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa) { - struct pf_altq *altq; - int root_class, default_class; - int error = 0; + int error = 0; /* * check if cbq has one root queue and one default queue * for this interface */ - root_class = default_class = 0; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) - root_class++; - if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) - default_class++; - } - if (root_class != 1) { - warnx("should have one root queue on %s", pa->ifname); + if (if_ppa->meta.root_classes != 1) { + warnx("should have one root queue on %s", if_ppa->pa.ifname); error++; } - if (default_class != 1) { - warnx("should have one default queue on %s", pa->ifname); + if (if_ppa->meta.default_classes != 1) { + warnx("should have one default queue on %s", if_ppa->pa.ifname); error++; } return (error); @@ -641,51 +688,37 @@ print_cbq_opts(const struct pf_altq *a) * PRIQ support functions */ static int -eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa) +eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa) { - struct pf_altq *altq; if (pa->priority >= PRIQ_MAXPRI) { warnx("priority out of range: max %d", PRIQ_MAXPRI - 1); return (-1); } - /* the priority should be unique for the interface */ - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 && - altq->qname[0] != 0 && altq->priority == pa->priority) { - warnx("%s and %s have the same priority", - altq->qname, pa->qname); - return (-1); - } - } + if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) { + warnx("%s does not have a unique priority on interface %s", + pa->qname, pa->ifname); + return (-1); + } else + BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris); + if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) + if_ppa->meta.default_classes++; return (0); } static int -check_commit_priq(int dev, int opts, struct pf_altq *pa) +check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa) { - struct pf_altq *altq; - int default_class; - int error = 0; /* * check if priq has one default class for this interface */ - default_class = 0; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) - default_class++; + if (if_ppa->meta.default_classes != 1) { + warnx("should have one default queue on %s", if_ppa->pa.ifname); + return (1); } - if (default_class != 1) { - warnx("should have one default queue on %s", pa->ifname); - error++; - } - return (error); + return (0); } static int @@ -720,15 +753,15 @@ print_priq_opts(const struct pf_altq *a) * HFSC support functions */ static int -eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) +eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa, + struct pfctl_altq *parent) { - struct pf_altq *altq, *parent; struct hfsc_opts_v1 *opts; struct service_curve sc; opts = &pa->pq_u.hfsc_opts; - if (pa->parent[0] == 0) { + if (parent == NULL) { /* root queue */ opts->lssc_m1 = pa->ifbandwidth; opts->lssc_m2 = pa->ifbandwidth; @@ -736,9 +769,21 @@ eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) return (0); } - LIST_INIT(&rtsc); - LIST_INIT(&lssc); + /* First child initializes the parent's service curve accumulators. */ + if (parent->meta.children == 1) { + LIST_INIT(&parent->meta.rtsc); + LIST_INIT(&parent->meta.lssc); + } + if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { + warnx("adding %s would make default queue %s not a leaf", + pa->qname, pa->parent); + return (-1); + } + + if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) + if_ppa->meta.default_classes++; + /* if link_share is not specified, use bandwidth */ if (opts->lssc_m2 == 0) opts->lssc_m2 = pa->bandwidth; @@ -768,51 +813,22 @@ eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) * be smaller than the interface bandwidth, and the upper-limit should * be larger than the real-time service curve when both are defined. */ - parent = qname_to_pfaltq(pa->parent, pa->ifname); - if (parent == NULL) - errx(1, "parent %s not found for %s", pa->parent, pa->qname); - - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - - /* if the class has a real-time service curve, add it. */ - if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) { - sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1; - sc.d = altq->pq_u.hfsc_opts.rtsc_d; - sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2; - gsc_add_sc(&rtsc, &sc); - } - - if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) - continue; - - /* if the class has a linkshare service curve, add it. */ - if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) { - sc.m1 = altq->pq_u.hfsc_opts.lssc_m1; - sc.d = altq->pq_u.hfsc_opts.lssc_d; - sc.m2 = altq->pq_u.hfsc_opts.lssc_m2; - gsc_add_sc(&lssc, &sc); - } - } - + /* check the real-time service curve. reserve 20% of interface bw */ if (opts->rtsc_m2 != 0) { /* add this queue to the sum */ sc.m1 = opts->rtsc_m1; sc.d = opts->rtsc_d; sc.m2 = opts->rtsc_m2; - gsc_add_sc(&rtsc, &sc); + gsc_add_sc(&parent->meta.rtsc, &sc); /* compare the sum with 80% of the interface */ sc.m1 = 0; sc.d = 0; sc.m2 = pa->ifbandwidth / 100 * 80; - if (!is_gsc_under_sc(&rtsc, &sc)) { + if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) { warnx("real-time sc exceeds 80%% of the interface " "bandwidth (%s)", rate2str((double)sc.m2)); - goto err_ret; + return (-1); } } @@ -822,14 +838,14 @@ eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) sc.m1 = opts->lssc_m1; sc.d = opts->lssc_d; sc.m2 = opts->lssc_m2; - gsc_add_sc(&lssc, &sc); + gsc_add_sc(&parent->meta.lssc, &sc); /* compare the sum of the children with parent's sc */ - sc.m1 = parent->pq_u.hfsc_opts.lssc_m1; - sc.d = parent->pq_u.hfsc_opts.lssc_d; - sc.m2 = parent->pq_u.hfsc_opts.lssc_m2; - if (!is_gsc_under_sc(&lssc, &sc)) { + sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1; + sc.d = parent->pa.pq_u.hfsc_opts.lssc_d; + sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2; + if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) { warnx("linkshare sc exceeds parent's sc"); - goto err_ret; + return (-1); } } @@ -838,38 +854,30 @@ eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa) if (opts->ulsc_m1 > pa->ifbandwidth || opts->ulsc_m2 > pa->ifbandwidth) { warnx("upper-limit larger than interface bandwidth"); - goto err_ret; + return (-1); } if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) { warnx("upper-limit sc smaller than real-time sc"); - goto err_ret; + return (-1); } } - gsc_destroy(&rtsc); - gsc_destroy(&lssc); - return (0); - -err_ret: - gsc_destroy(&rtsc); - gsc_destroy(&lssc); - return (-1); } /* * FAIRQ support functions */ static int -eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa) +eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa, + struct pfctl_altq *if_ppa, struct pfctl_altq *parent) { - struct pf_altq *altq, *parent; struct fairq_opts *opts; struct service_curve sc; opts = &pa->pq_u.fairq_opts; - if (pa->parent[0] == 0) { + if (pa->parent == NULL) { /* root queue */ opts->lssc_m1 = pa->ifbandwidth; opts->lssc_m2 = pa->ifbandwidth; @@ -877,7 +885,18 @@ eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa) return (0); } - LIST_INIT(&lssc); + /* First child initializes the parent's service curve accumulator. */ + if (parent->meta.children == 1) + LIST_INIT(&parent->meta.lssc); + + if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) { + warnx("adding %s would make default queue %s not a leaf", + pa->qname, pa->parent); + return (-1); + } + + if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) + if_ppa->meta.default_classes++; /* if link_share is not specified, use bandwidth */ if (opts->lssc_m2 == 0) @@ -894,122 +913,49 @@ eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa) * be smaller than the interface bandwidth, and the upper-limit should * be larger than the real-time service curve when both are defined. */ - parent = qname_to_pfaltq(pa->parent, pa->ifname); - if (parent == NULL) - errx(1, "parent %s not found for %s", pa->parent, pa->qname); - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - - if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0) - continue; - - /* if the class has a link-sharing service curve, add it. */ - if (opts->lssc_m2 != 0 && altq->pq_u.fairq_opts.lssc_m2 != 0) { - sc.m1 = altq->pq_u.fairq_opts.lssc_m1; - sc.d = altq->pq_u.fairq_opts.lssc_d; - sc.m2 = altq->pq_u.fairq_opts.lssc_m2; - gsc_add_sc(&lssc, &sc); - } - } - - /* check the link-sharing service curve. */ + /* check the linkshare service curve. */ if (opts->lssc_m2 != 0) { - sc.m1 = parent->pq_u.fairq_opts.lssc_m1; - sc.d = parent->pq_u.fairq_opts.lssc_d; - sc.m2 = parent->pq_u.fairq_opts.lssc_m2; - if (!is_gsc_under_sc(&lssc, &sc)) { + /* add this queue to the child sum */ + sc.m1 = opts->lssc_m1; + sc.d = opts->lssc_d; + sc.m2 = opts->lssc_m2; + gsc_add_sc(&parent->meta.lssc, &sc); + /* compare the sum of the children with parent's sc */ + sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1; + sc.d = parent->pa.pq_u.fairq_opts.lssc_d; + sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2; + if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) { warnx("link-sharing sc exceeds parent's sc"); - goto err_ret; + return (-1); } } - gsc_destroy(&lssc); - return (0); - -err_ret: - gsc_destroy(&lssc); - return (-1); } static int -check_commit_hfsc(int dev, int opts, struct pf_altq *pa) +check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa) { - struct pf_altq *altq, *def = NULL; - int default_class; - int error = 0; /* check if hfsc has one default queue for this interface */ - default_class = 0; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (altq->parent[0] == 0) /* dummy root */ - continue; - if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { - default_class++; - def = altq; - } - } - if (default_class != 1) { - warnx("should have one default queue on %s", pa->ifname); + if (if_ppa->meta.default_classes != 1) { + warnx("should have one default queue on %s", if_ppa->pa.ifname); return (1); } - /* make sure the default queue is a leaf */ - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { - warnx("default queue is not a leaf"); - error++; - } - } - return (error); + return (0); } static int -check_commit_fairq(int dev __unused, int opts __unused, struct pf_altq *pa) +check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa) { - struct pf_altq *altq, *def = NULL; - int default_class; - int error = 0; /* check if fairq has one default queue for this interface */ - default_class = 0; - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (altq->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) { - default_class++; - def = altq; - } - } - if (default_class != 1) { - warnx("should have one default queue on %s", pa->ifname); + if (if_ppa->meta.default_classes != 1) { + warnx("should have one default queue on %s", if_ppa->pa.ifname); return (1); } - /* make sure the default queue is a leaf */ - TAILQ_FOREACH(altq, &altqs, entries) { - if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) - continue; - if (altq->qname[0] == 0) /* this is for interface */ - continue; - if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) { - warnx("default queue is not a leaf"); - error++; - } - } - return (error); + return (0); } static int @@ -1182,17 +1128,6 @@ is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc) return (1); } -static void -gsc_destroy(struct gen_sc *gsc) -{ - struct segment *s; - - while ((s = LIST_FIRST(gsc)) != NULL) { - LIST_REMOVE(s, _next); - free(s); - } -} - /* * return a segment entry starting at x. * if gsc has no entry starting at x, a new entry is created at x. @@ -1351,8 +1286,7 @@ getifspeed(char *ifname) struct ifreq ifr; struct if_data ifrdat; - if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) - err(1, "socket"); + s = get_query_socket(); bzero(&ifr, sizeof(ifr)); if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) @@ -1360,8 +1294,6 @@ getifspeed(char *ifname) ifr.ifr_data = (caddr_t)&ifrdat; if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1) err(1, "SIOCGIFDATA"); - if (close(s)) - err(1, "close"); return ((u_int32_t)ifrdat.ifi_baudrate); } #endif @@ -1372,8 +1304,7 @@ getifmtu(char *ifname) int s; struct ifreq ifr; - if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) < 0) - err(1, "socket"); + s = get_query_socket(); bzero(&ifr, sizeof(ifr)); if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) @@ -1384,8 +1315,6 @@ getifmtu(char *ifname) #else err(1, "SIOCGIFMTU"); #endif - if (close(s)) - err(1, "close"); if (ifr.ifr_mtu > 0) return (ifr.ifr_mtu); else { diff --git a/sbin/pfctl/pfctl_parser.c b/sbin/pfctl/pfctl_parser.c index 80195b1f5afb..c986cf15e498 100644 --- a/sbin/pfctl/pfctl_parser.c +++ b/sbin/pfctl/pfctl_parser.c @@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include @@ -72,7 +73,6 @@ void print_fromto(struct pf_rule_addr *, pf_osfp_t, struct pf_rule_addr *, u_int8_t, u_int8_t, int, int); int ifa_skip_if(const char *filter, struct node_host *p); -struct node_host *ifa_grouplookup(const char *, int); struct node_host *host_if(const char *, int); struct node_host *host_v4(const char *, int); struct node_host *host_v6(const char *, int); @@ -209,6 +209,19 @@ const struct pf_timeout pf_timeouts[] = { { NULL, 0 } }; +static struct hsearch_data isgroup_map; + +static __attribute__((constructor)) void +pfctl_parser_init(void) +{ + /* + * As hdestroy() will never be called on these tables, it will be + * safe to use references into the stored data as keys. + */ + if (hcreate_r(0, &isgroup_map) == 0) + err(1, "Failed to create interface group query response map"); +} + const struct icmptypeent * geticmptypebynumber(u_int8_t type, sa_family_t af) { @@ -1153,6 +1166,71 @@ check_netmask(struct node_host *h, sa_family_t af) static struct node_host *iftab; +/* + * Retrieve the list of groups this interface is a member of and make sure + * each group is in the group map. + */ +static void +ifa_add_groups_to_map(char *ifa_name) +{ + int s, len; + struct ifgroupreq ifgr; + struct ifg_req *ifg; + + s = get_query_socket(); + + /* Get size of group list for this interface */ + memset(&ifgr, 0, sizeof(ifgr)); + strlcpy(ifgr.ifgr_name, ifa_name, IFNAMSIZ); + if (ioctl(s, SIOCGIFGROUP, (caddr_t)&ifgr) == -1) + err(1, "SIOCGIFGROUP"); + + /* Retrieve group list for this interface */ + len = ifgr.ifgr_len; + ifgr.ifgr_groups = + (struct ifg_req *)calloc(len / sizeof(struct ifg_req), + sizeof(struct ifg_req)); + if (ifgr.ifgr_groups == NULL) + err(1, "calloc"); + if (ioctl(s, SIOCGIFGROUP, (caddr_t)&ifgr) == -1) + err(1, "SIOCGIFGROUP"); + + ifg = ifgr.ifgr_groups; + for (; ifg && len >= sizeof(struct ifg_req); ifg++) { + len -= sizeof(struct ifg_req); + if (strcmp(ifg->ifgrq_group, "all")) { + ENTRY item; + ENTRY *ret_item; + int *answer; + + item.key = ifg->ifgrq_group; + if (hsearch_r(item, FIND, &ret_item, &isgroup_map) == 0) { + struct ifgroupreq ifgr2; + + /* Don't know the answer yet */ + if ((answer = malloc(sizeof(int))) == NULL) + err(1, "malloc"); + + bzero(&ifgr2, sizeof(ifgr2)); + strlcpy(ifgr2.ifgr_name, ifg->ifgrq_group, + sizeof(ifgr2.ifgr_name)); + if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr2) == 0) + *answer = ifgr2.ifgr_len; + else + *answer = 0; + + item.key = strdup(ifg->ifgrq_group); + item.data = answer; + if (hsearch_r(item, ENTER, &ret_item, + &isgroup_map) == 0) + err(1, "interface group query response" + " map insert"); + } + } + } + free(ifgr.ifgr_groups); +} + void ifa_load(void) { @@ -1220,6 +1298,8 @@ ifa_load(void) sizeof(struct in6_addr)); n->ifindex = ((struct sockaddr_in6 *) ifa->ifa_addr)->sin6_scope_id; + } else if (n->af == AF_LINK) { + ifa_add_groups_to_map(ifa->ifa_name); } if ((n->ifname = strdup(ifa->ifa_name)) == NULL) err(1, "ifa_load: strdup"); @@ -1237,7 +1317,7 @@ ifa_load(void) freeifaddrs(ifap); } -int +static int get_socket_domain(void) { int sdom; @@ -1257,31 +1337,54 @@ get_socket_domain(void) return (sdom); } +int +get_query_socket(void) +{ + static int s = -1; + + if (s == -1) { + if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) == -1) + err(1, "socket"); + } + + return (s); +} + +/* + * Returns the response len if the name is a group, otherwise returns 0. + */ +static int +is_a_group(char *name) +{ + ENTRY item; + ENTRY *ret_item; + + item.key = name; + if (hsearch_r(item, FIND, &ret_item, &isgroup_map) == 0) + return (0); + + return (*(int *)ret_item->data); +} + struct node_host * -ifa_exists(const char *ifa_name) +ifa_exists(char *ifa_name) { struct node_host *n; - struct ifgroupreq ifgr; int s; if (iftab == NULL) ifa_load(); - /* check wether this is a group */ - if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) == -1) - err(1, "socket"); - bzero(&ifgr, sizeof(ifgr)); - strlcpy(ifgr.ifgr_name, ifa_name, sizeof(ifgr.ifgr_name)); - if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr) == 0) { + /* check whether this is a group */ + s = get_query_socket(); + if (is_a_group(ifa_name)) { /* fake a node_host */ if ((n = calloc(1, sizeof(*n))) == NULL) err(1, "calloc"); if ((n->ifname = strdup(ifa_name)) == NULL) err(1, "strdup"); - close(s); return (n); } - close(s); for (n = iftab; n; n = n->next) { if (n->af == AF_LINK && !strncmp(n->ifname, ifa_name, IFNAMSIZ)) @@ -1292,23 +1395,19 @@ ifa_exists(const char *ifa_name) } struct node_host * -ifa_grouplookup(const char *ifa_name, int flags) +ifa_grouplookup(char *ifa_name, int flags) { struct ifg_req *ifg; struct ifgroupreq ifgr; int s, len; struct node_host *n, *h = NULL; - if ((s = socket(get_socket_domain(), SOCK_DGRAM, 0)) == -1) - err(1, "socket"); + s = get_query_socket(); + len = is_a_group(ifa_name); + if (len == 0) + return (NULL); bzero(&ifgr, sizeof(ifgr)); strlcpy(ifgr.ifgr_name, ifa_name, sizeof(ifgr.ifgr_name)); - if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr) == -1) { - close(s); - return (NULL); - } - - len = ifgr.ifgr_len; if ((ifgr.ifgr_groups = calloc(1, len)) == NULL) err(1, "calloc"); if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr) == -1) @@ -1327,13 +1426,12 @@ ifa_grouplookup(const char *ifa_name, int flags) } } free(ifgr.ifgr_groups); - close(s); return (h); } struct node_host * -ifa_lookup(const char *ifa_name, int flags) +ifa_lookup(char *ifa_name, int flags) { struct node_host *p = NULL, *h = NULL, *n = NULL; int got4 = 0, got6 = 0; diff --git a/sbin/pfctl/pfctl_parser.h b/sbin/pfctl/pfctl_parser.h index 05ebdf29f6fc..aa6d98d7cf91 100644 --- a/sbin/pfctl/pfctl_parser.h +++ b/sbin/pfctl/pfctl_parser.h @@ -177,6 +177,24 @@ struct node_queue_opt { } data; }; +#define QPRI_BITSET_SIZE 256 +BITSET_DEFINE(qpri_bitset, QPRI_BITSET_SIZE); +LIST_HEAD(gen_sc, segment); + +struct pfctl_altq { + struct pf_altq pa; + struct { + STAILQ_ENTRY(pfctl_altq) link; + u_int64_t bwsum; + struct qpri_bitset qpris; + int children; + int root_classes; + int default_classes; + struct gen_sc lssc; + struct gen_sc rtsc; + } meta; +}; + #ifdef __FreeBSD__ /* * XXX @@ -313,10 +331,10 @@ void set_ipmask(struct node_host *, u_int8_t); int check_netmask(struct node_host *, sa_family_t); int unmask(struct pf_addr *, sa_family_t); void ifa_load(void); -int get_socket_domain(void); -struct node_host *ifa_exists(const char *); -struct node_host *ifa_grouplookup(const char *ifa_name, int flags); -struct node_host *ifa_lookup(const char *, int); +int get_query_socket(void); +struct node_host *ifa_exists(char *); +struct node_host *ifa_grouplookup(char *ifa_name, int flags); +struct node_host *ifa_lookup(char *, int); struct node_host *host(const char *); int append_addr(struct pfr_buffer *, char *, int); diff --git a/share/skel/dot.cshrc b/share/skel/dot.cshrc index 47093f7b49ca..e923e4676742 100644 --- a/share/skel/dot.cshrc +++ b/share/skel/dot.cshrc @@ -15,7 +15,6 @@ alias ll ls -lAF # These are normally set through /etc/login.conf. You may override them here # if wanted. # set path = (/sbin /bin /usr/sbin /usr/bin /usr/local/sbin /usr/local/bin $HOME/bin) -# setenv BLOCKSIZE K # A righteous umask # umask 22 diff --git a/share/skel/dot.profile b/share/skel/dot.profile index 646a55b96b4b..02623293b592 100644 --- a/share/skel/dot.profile +++ b/share/skel/dot.profile @@ -8,7 +8,6 @@ # These are normally set through /etc/login.conf. You may override them here # if wanted. # PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:$HOME/bin; export PATH -# BLOCKSIZE=K; export BLOCKSIZE # Setting TERM is normally done through /etc/ttys. Do only override # if you're sure that you'll never log in via telnet or xterm or a diff --git a/stand/mips/beri/boot2/boot2.c b/stand/mips/beri/boot2/boot2.c index a875ff743ae1..f771da856e4c 100644 --- a/stand/mips/beri/boot2/boot2.c +++ b/stand/mips/beri/boot2/boot2.c @@ -651,3 +651,19 @@ xgetc(int fn) return 0; } } + +int +getchar(void) +{ + + return xgetc(0); +} + +void +exit(int code) +{ + + printf("error: loader exit\n"); + while (1); + __unreachable(); +} diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC index 57fa3067c2ea..1053166125b7 100644 --- a/sys/amd64/conf/GENERIC +++ b/sys/amd64/conf/GENERIC @@ -284,7 +284,6 @@ device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs -options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support diff --git a/sys/amd64/linux/linux_machdep.c b/sys/amd64/linux/linux_machdep.c index 434ea0eac07c..a931db6f4d2b 100644 --- a/sys/amd64/linux/linux_machdep.c +++ b/sys/amd64/linux/linux_machdep.c @@ -201,6 +201,7 @@ linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) l_stack_t lss; int error; + memset(&lss, 0, sizeof(lss)); LINUX_CTR2(sigaltstack, "%p, %p", uap->uss, uap->uoss); if (uap->uss != NULL) { diff --git a/sys/arm/allwinner/if_awg.c b/sys/arm/allwinner/if_awg.c index 7ad974239156..0e97cf826c89 100644 --- a/sys/arm/allwinner/if_awg.c +++ b/sys/arm/allwinner/if_awg.c @@ -1466,6 +1466,12 @@ awg_setup_extres(device_t dev) goto fail; } if (rst_ephy != NULL) { + /* + * The ephy reset is left de-asserted by U-Boot. Assert it + * here to make sure that we're in a known good state going + * into the PHY reset. + */ + hwreset_assert(rst_ephy); error = hwreset_deassert(rst_ephy); if (error != 0) { device_printf(dev, "cannot de-assert ephy reset\n"); diff --git a/sys/arm/conf/RK3188 b/sys/arm/conf/RK3188 index 8ffe581ee1d0..440041a023ca 100644 --- a/sys/arm/conf/RK3188 +++ b/sys/arm/conf/RK3188 @@ -79,7 +79,6 @@ device bpf # Wireless NIC cards options IEEE80211_DEBUG -options IEEE80211_AMPDU_AGE options IEEE80211_SUPPORT_MESH options IEEE80211_SUPPORT_TDMA device wlan # 802.11 support diff --git a/sys/compat/linux/linux_ioctl.c b/sys/compat/linux/linux_ioctl.c index 0a8e5087984b..adcd9d137758 100644 --- a/sys/compat/linux/linux_ioctl.c +++ b/sys/compat/linux/linux_ioctl.c @@ -686,6 +686,7 @@ bsd_to_linux_termio(struct termios *bios, struct linux_termio *lio) { struct linux_termios lios; + memset(lio, 0, sizeof(*lio)); bsd_to_linux_termios(bios, &lios); lio->c_iflag = lios.c_iflag; lio->c_oflag = lios.c_oflag; @@ -2843,6 +2844,8 @@ linux_to_bsd_v4l_window(struct l_video_window *lvw, struct video_window *vw) static int bsd_to_linux_v4l_window(struct video_window *vw, struct l_video_window *lvw) { + memset(lvw, 0, sizeof(*lvw)); + lvw->x = vw->x; lvw->y = vw->y; lvw->width = vw->width; diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c index 5dcc8c64788e..f888a2640d1f 100644 --- a/sys/compat/linux/linux_misc.c +++ b/sys/compat/linux/linux_misc.c @@ -1089,9 +1089,8 @@ linux_waitid(struct thread *td, struct linux_waitid_args *args) } if (args->info != NULL) { p = td->td_proc; - if (td->td_retval[0] == 0) - bzero(&lsi, sizeof(lsi)); - else { + bzero(&lsi, sizeof(lsi)); + if (td->td_retval[0] != 0) { sig = bsd_to_linux_signal(siginfo.si_signo); siginfo_to_lsiginfo(&siginfo, &lsi, sig); } diff --git a/sys/compat/ndis/kern_windrv.c b/sys/compat/ndis/kern_windrv.c index 597b8362b37e..fe60b0e40767 100644 --- a/sys/compat/ndis/kern_windrv.c +++ b/sys/compat/ndis/kern_windrv.c @@ -58,6 +58,10 @@ __FBSDID("$FreeBSD$"); #include #endif +#ifdef __amd64__ +#include +#endif + #include #include @@ -68,6 +72,19 @@ __FBSDID("$FreeBSD$"); #include #include +#ifdef __amd64__ +struct fpu_cc_ent { + struct fpu_kern_ctx *ctx; + LIST_ENTRY(fpu_cc_ent) entries; +}; +static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head = + LIST_HEAD_INITIALIZER(fpu_free_head); +static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head = + LIST_HEAD_INITIALIZER(fpu_busy_head); +static struct mtx fpu_free_mtx; +static struct mtx fpu_busy_mtx; +#endif + static struct mtx drvdb_mtx; static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head; @@ -98,6 +115,13 @@ windrv_libinit(void) mtx_init(&drvdb_mtx, "Windows driver DB lock", "Windows internal lock", MTX_DEF); +#ifdef __amd64__ + LIST_INIT(&fpu_free_head); + LIST_INIT(&fpu_busy_head); + mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF); + mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF); +#endif + /* * PCI and pccard devices don't need to use IRPs to * interact with their bus drivers (usually), so our @@ -132,6 +156,9 @@ int windrv_libfini(void) { struct drvdb_ent *d; +#ifdef __amd64__ + struct fpu_cc_ent *ent; +#endif mtx_lock(&drvdb_mtx); while(STAILQ_FIRST(&drvdb_head) != NULL) { @@ -149,6 +176,18 @@ windrv_libfini(void) #ifdef __i386__ smp_rendezvous(NULL, x86_oldldt, NULL, NULL); ExFreePool(my_tids); +#endif +#ifdef __amd64__ + while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { + LIST_REMOVE(ent, entries); + fpu_kern_free_ctx(ent->ctx); + free(ent, M_DEVBUF); + } + mtx_destroy(&fpu_free_mtx); + + ent = LIST_FIRST(&fpu_busy_head); + KASSERT(ent == NULL, ("busy fpu context list is not empty")); + mtx_destroy(&fpu_busy_mtx); #endif return (0); } @@ -615,6 +654,148 @@ windrv_wrap(func, wrap, argcnt, ftype) return (0); } + +static struct fpu_cc_ent * +request_fpu_cc_ent(void) +{ + struct fpu_cc_ent *ent; + + mtx_lock(&fpu_free_mtx); + if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) { + LIST_REMOVE(ent, entries); + mtx_unlock(&fpu_free_mtx); + mtx_lock(&fpu_busy_mtx); + LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); + mtx_unlock(&fpu_busy_mtx); + return (ent); + } + mtx_unlock(&fpu_free_mtx); + + if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT | + M_ZERO)) != NULL) { + ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL | + FPU_KERN_NOWAIT); + if (ent->ctx != NULL) { + mtx_lock(&fpu_busy_mtx); + LIST_INSERT_HEAD(&fpu_busy_head, ent, entries); + mtx_unlock(&fpu_busy_mtx); + } else { + free(ent, M_DEVBUF); + ent = NULL; + } + } + + return (ent); +} + +static void +release_fpu_cc_ent(struct fpu_cc_ent *ent) +{ + mtx_lock(&fpu_busy_mtx); + LIST_REMOVE(ent, entries); + mtx_unlock(&fpu_busy_mtx); + mtx_lock(&fpu_free_mtx); + LIST_INSERT_HEAD(&fpu_free_head, ent, entries); + mtx_unlock(&fpu_free_mtx); +} + +uint64_t +_x86_64_call1(void *fn, uint64_t a) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call1(fn, a); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call2(void *fn, uint64_t a, uint64_t b) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call2(fn, a, b); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call3(fn, a, b, c); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call4(fn, a, b, c, d); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, + uint64_t e) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call5(fn, a, b, c, d, e); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} + +uint64_t +_x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d, + uint64_t e, uint64_t f) +{ + struct fpu_cc_ent *ent; + uint64_t ret; + + if ((ent = request_fpu_cc_ent()) == NULL) + return (ENOMEM); + fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL); + ret = x86_64_call6(fn, a, b, c, d, e, f); + fpu_kern_leave(curthread, ent->ctx); + release_fpu_cc_ent(ent); + + return (ret); +} #endif /* __amd64__ */ diff --git a/sys/compat/ndis/pe_var.h b/sys/compat/ndis/pe_var.h index 2e5758c592e1..8a56a4c5bafd 100644 --- a/sys/compat/ndis/pe_var.h +++ b/sys/compat/ndis/pe_var.h @@ -460,22 +460,30 @@ extern uint64_t x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t, extern uint64_t x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call1(void *, uint64_t); +uint64_t _x86_64_call2(void *, uint64_t, uint64_t); +uint64_t _x86_64_call3(void *, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call4(void *, uint64_t, uint64_t, uint64_t, uint64_t); +uint64_t _x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t, + uint64_t); +uint64_t _x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t, + uint64_t, uint64_t); #define MSCALL1(fn, a) \ - x86_64_call1((fn), (uint64_t)(a)) + _x86_64_call1((fn), (uint64_t)(a)) #define MSCALL2(fn, a, b) \ - x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b)) + _x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b)) #define MSCALL3(fn, a, b, c) \ - x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c)) #define MSCALL4(fn, a, b, c, d) \ - x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d)) #define MSCALL5(fn, a, b, c, d, e) \ - x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d), (uint64_t)(e)) #define MSCALL6(fn, a, b, c, d, e, f) \ - x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \ + _x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \ (uint64_t)(c), (uint64_t)(d), (uint64_t)(e), (uint64_t)(f)) #endif /* __amd64__ */ diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 1b019dbe09ff..e764c16fced0 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -843,7 +843,6 @@ device vxlan # and ath drivers and will eventually be required by all 802.11 drivers. device wlan options IEEE80211_DEBUG #enable debugging msgs -options IEEE80211_AMPDU_AGE #age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH #enable 802.11s D3.0 support options IEEE80211_SUPPORT_TDMA #enable TDMA support diff --git a/sys/conf/config.mk b/sys/conf/config.mk index 63c82fb36819..1e40dc3f5923 100644 --- a/sys/conf/config.mk +++ b/sys/conf/config.mk @@ -31,13 +31,12 @@ opt_scsi.h: echo "#define SCSI_DELAY 15000" > ${.TARGET} opt_wlan.h: echo "#define IEEE80211_DEBUG 1" > ${.TARGET} - echo "#define IEEE80211_AMPDU_AGE 1" >> ${.TARGET} echo "#define IEEE80211_SUPPORT_MESH 1" >> ${.TARGET} KERN_OPTS.i386=NEW_PCIB DEV_PCI KERN_OPTS.amd64=NEW_PCIB DEV_PCI KERN_OPTS.powerpc=NEW_PCIB DEV_PCI KERN_OPTS=MROUTING IEEE80211_DEBUG \ - IEEE80211_AMPDU_AGE IEEE80211_SUPPORT_MESH DEV_BPF \ + IEEE80211_SUPPORT_MESH DEV_BPF \ ${KERN_OPTS.${MACHINE}} ${KERN_OPTS_EXTRA} .if ${MK_INET_SUPPORT} != "no" KERN_OPTS+= INET TCP_OFFLOAD diff --git a/sys/conf/options b/sys/conf/options index 4724a1a601c1..6d902001b543 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -908,7 +908,6 @@ HWPMC_MIPS_BACKTRACE opt_hwpmc_hooks.h # 802.11 support layer IEEE80211_DEBUG opt_wlan.h IEEE80211_DEBUG_REFCNT opt_wlan.h -IEEE80211_AMPDU_AGE opt_wlan.h IEEE80211_SUPPORT_MESH opt_wlan.h IEEE80211_SUPPORT_SUPERG opt_wlan.h IEEE80211_SUPPORT_TDMA opt_wlan.h diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c index c1a232e78950..49a57684c42b 100644 --- a/sys/dev/cxgbe/t4_filter.c +++ b/sys/dev/cxgbe/t4_filter.c @@ -1229,6 +1229,7 @@ t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss, /* provide errno instead of tid to ioctl */ f->tid = act_open_rpl_status_to_errno(status); f->valid = 0; + f->pending = 0; if (act_open_has_tid(status)) release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]); free_filter_resources(f); @@ -1587,7 +1588,6 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple, f->locked = 0; t->idx = f->tid; } else { - remove_hf(sc, f); rc = f->tid; free(f, M_CXGBE); } diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index 4e1eff45374a..fcdcf92add20 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -480,9 +480,10 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, /* * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, - * encouraged respectively). + * encouraged respectively). '-n' is the same as 'n' except the firmware + * version used in the checks is read from the firmware bundled with the driver. */ -static unsigned int t4_fw_install = 1; +static int t4_fw_install = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); @@ -3324,17 +3325,38 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) +/* Just enough of fw_hdr to cover all version info. */ +struct fw_h { + __u8 ver; + __u8 chip; + __be16 len512; + __be32 fw_ver; + __be32 tp_microcode_ver; + __u8 intfver_nic; + __u8 intfver_vnic; + __u8 intfver_ofld; + __u8 intfver_ri; + __u8 intfver_iscsipdu; + __u8 intfver_iscsi; + __u8 intfver_fcoepdu; + __u8 intfver_fcoe; +}; +/* Spot check a couple of fields. */ +CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); +CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); +CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); + struct fw_info { uint8_t chip; char *kld_name; char *fw_mod_name; - struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ + struct fw_h fw_h; } fw_info[] = { { .chip = CHELSIO_T4, .kld_name = "t4fw_cfg", .fw_mod_name = "t4fw", - .fw_hdr = { + .fw_h = { .chip = FW_HDR_CHIP_T4, .fw_ver = htobe32(FW_VERSION(T4)), .intfver_nic = FW_INTFVER(T4, NIC), @@ -3350,7 +3372,7 @@ struct fw_info { .chip = CHELSIO_T5, .kld_name = "t5fw_cfg", .fw_mod_name = "t5fw", - .fw_hdr = { + .fw_h = { .chip = FW_HDR_CHIP_T5, .fw_ver = htobe32(FW_VERSION(T5)), .intfver_nic = FW_INTFVER(T5, NIC), @@ -3366,7 +3388,7 @@ struct fw_info { .chip = CHELSIO_T6, .kld_name = "t6fw_cfg", .fw_mod_name = "t6fw", - .fw_hdr = { + .fw_h = { .chip = FW_HDR_CHIP_T6, .fw_ver = htobe32(FW_VERSION(T6)), .intfver_nic = FW_INTFVER(T6, NIC), @@ -3398,7 +3420,7 @@ find_fw_info(int chip) * with? */ static int -fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) +fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) { /* short circuit if it's the exact same firmware version */ @@ -3465,14 +3487,19 @@ unload_fw_module(struct adapter *sc, const struct firmware *dcfg, * +ve errno means a firmware install was attempted but failed. */ static int -install_kld_firmware(struct adapter *sc, struct fw_hdr *card_fw, - const struct fw_hdr *drv_fw, const char *reason, int *already) +install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, + const struct fw_h *drv_fw, const char *reason, int *already) { const struct firmware *cfg, *fw; const uint32_t c = be32toh(card_fw->fw_ver); - const uint32_t d = be32toh(drv_fw->fw_ver); - uint32_t k; - int rc; + uint32_t d, k; + int rc, fw_install; + struct fw_h bundled_fw; + bool load_attempted; + + cfg = fw = NULL; + load_attempted = false; + fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; if (reason != NULL) goto install; @@ -3487,7 +3514,23 @@ install_kld_firmware(struct adapter *sc, struct fw_hdr *card_fw, return (0); } - if (!fw_compatible(card_fw, drv_fw)) { + memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); + if (t4_fw_install < 0) { + rc = load_fw_module(sc, &cfg, &fw); + if (rc != 0 || fw == NULL) { + device_printf(sc->dev, + "failed to load firmware module: %d. cfg %p, fw %p;" + " will use compiled-in firmware version for" + "hw.cxgbe.fw_install checks.\n", + rc, cfg, fw); + } else { + memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); + } + load_attempted = true; + } + d = be32toh(bundled_fw.fw_ver); + + if (!fw_compatible(card_fw, &bundled_fw)) { reason = "incompatible or unusable"; goto install; } @@ -3497,25 +3540,64 @@ install_kld_firmware(struct adapter *sc, struct fw_hdr *card_fw, goto install; } - if (t4_fw_install == 2 && d != c) { + if (fw_install == 2 && d != c) { reason = "different than the version bundled with this driver"; goto install; } - return (0); + /* No reason to do anything to the firmware already on the card. */ + rc = 0; + goto done; install: + rc = 0; if ((*already)++) - return (0); + goto done; - if (t4_fw_install == 0) { + if (fw_install == 0) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver is prohibited from installing a firmware " "on the card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); - return (0); + goto done; + } + + /* + * We'll attempt to install a firmware. Load the module first (if it + * hasn't been loaded already). + */ + if (!load_attempted) { + rc = load_fw_module(sc, &cfg, &fw); + if (rc != 0 || fw == NULL) { + device_printf(sc->dev, + "failed to load firmware module: %d. cfg %p, fw %p\n", + rc, cfg, fw); + /* carry on */ + } + } + if (fw == NULL) { + device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " + "but the driver cannot take corrective action because it " + "is unable to load the firmware module.\n", + G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), + G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); + rc = sc->flags & FW_OK ? 0 : ENOENT; + goto done; + } + k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); + if (k != d) { + MPASS(t4_fw_install > 0); + device_printf(sc->dev, + "firmware in KLD (%u.%u.%u.%u) is not what the driver was " + "expecting (%u.%u.%u.%u) and will not be used.\n", + G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), + G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), + G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), + G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); + rc = sc->flags & FW_OK ? 0 : EINVAL; + goto done; } device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " @@ -3525,25 +3607,6 @@ install_kld_firmware(struct adapter *sc, struct fw_hdr *card_fw, G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); - rc = load_fw_module(sc, &cfg, &fw); - if (rc != 0 || fw == NULL) { - device_printf(sc->dev, - "failed to load firmware module: %d. cfg %p, fw %p\n", rc, - cfg, fw); - rc = sc->flags & FW_OK ? 0 : ENOENT; - goto done; - } - k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); - if (k != d) { - device_printf(sc->dev, - "firmware in KLD (%u.%u.%u.%u) is not what the driver was " - "compiled with and will not be used.\n", - G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), - G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); - rc = sc->flags & FW_OK ? 0 : EINVAL; - goto done; - } - rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); if (rc != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", rc); @@ -3571,7 +3634,7 @@ contact_firmware(struct adapter *sc) enum dev_state state; struct fw_info *fw_info; struct fw_hdr *card_fw; /* fw on the card */ - const struct fw_hdr *drv_fw; /* fw bundled with the driver */ + const struct fw_h *drv_fw; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { @@ -3580,7 +3643,7 @@ contact_firmware(struct adapter *sc) chip_id(sc)); return (EINVAL); } - drv_fw = &fw_info->fw_hdr; + drv_fw = &fw_info->fw_h; /* Read the header of the firmware on the card */ card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); @@ -3593,7 +3656,8 @@ contact_firmware(struct adapter *sc) goto done; } - rc = install_kld_firmware(sc, card_fw, drv_fw, NULL, &already); + rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, + &already); if (rc == ERESTART) goto restart; if (rc != 0) @@ -3606,7 +3670,7 @@ contact_firmware(struct adapter *sc) "failed to connect to the firmware: %d, %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); #if 0 - if (install_kld_firmware(sc, card_fw, drv_fw, + if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, "not responding properly to HELLO", &already) == ERESTART) goto restart; #endif @@ -3617,7 +3681,8 @@ contact_firmware(struct adapter *sc) if (rc == sc->pf) { sc->flags |= MASTER_PF; - rc = install_kld_firmware(sc, card_fw, drv_fw, NULL, &already); + rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, + NULL, &already); if (rc == ERESTART) rc = 0; else if (rc != 0) diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c index f0c0d41fa2b1..b9eb87560769 100644 --- a/sys/dev/hwpmc/hwpmc_mod.c +++ b/sys/dev/hwpmc/hwpmc_mod.c @@ -3512,6 +3512,7 @@ pmc_syscall_handler(struct thread *td, void *syscall_args) struct pmc_classdep *pcd; int cl; + memset(&gci, 0, sizeof(gci)); gci.pm_cputype = md->pmd_cputype; gci.pm_ncpu = pmc_cpu_max(); gci.pm_npmc = md->pmd_npmc; @@ -3661,7 +3662,7 @@ pmc_syscall_handler(struct thread *td, void *syscall_args) npmc = md->pmd_npmc; pmcinfo_size = npmc * sizeof(struct pmc_info); - pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK); + pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK | M_ZERO); p = pmcinfo; diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c index d80b22cf9dec..9bbf292bb74d 100644 --- a/sys/dev/iwm/if_iwm.c +++ b/sys/dev/iwm/if_iwm.c @@ -1033,7 +1033,8 @@ iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) * The hw rx ring index in shared memory must also be cleared, * otherwise the discrepancy can cause reprocessing chaos. */ - memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); + if (sc->rxq.stat) + memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); } static void diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c index c46568332dc6..e1e521ecf677 100644 --- a/sys/dev/iwn/if_iwn.c +++ b/sys/dev/iwn/if_iwn.c @@ -131,8 +131,8 @@ static const struct iwn_ident iwn_ident_table[] = { static int iwn_probe(device_t); static int iwn_attach(device_t); -static int iwn4965_attach(struct iwn_softc *, uint16_t); -static int iwn5000_attach(struct iwn_softc *, uint16_t); +static void iwn4965_attach(struct iwn_softc *, uint16_t); +static void iwn5000_attach(struct iwn_softc *, uint16_t); static int iwn_config_specific(struct iwn_softc *, uint16_t); static void iwn_radiotap_attach(struct iwn_softc *); static void iwn_sysctlattach(struct iwn_softc *); @@ -495,14 +495,9 @@ iwn_attach(device_t dev) * Let's set those up first. */ if (sc->hw_type == IWN_HW_REV_TYPE_4965) - error = iwn4965_attach(sc, pci_get_device(dev)); + iwn4965_attach(sc, pci_get_device(dev)); else - error = iwn5000_attach(sc, pci_get_device(dev)); - if (error != 0) { - device_printf(dev, "could not attach device, error %d\n", - error); - goto fail; - } + iwn5000_attach(sc, pci_get_device(dev)); /* * Next, let's setup the various parameters of each NIC. @@ -1224,12 +1219,13 @@ iwn_config_specific(struct iwn_softc *sc, uint16_t pid) return 0; } -static int +static void iwn4965_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); + ops->load_firmware = iwn4965_load_firmware; ops->read_eeprom = iwn4965_read_eeprom; ops->post_alive = iwn4965_post_alive; @@ -1264,11 +1260,9 @@ iwn4965_attach(struct iwn_softc *sc, uint16_t pid) sc->sc_flags |= IWN_FLAG_BTCOEX; DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); - - return 0; } -static int +static void iwn5000_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; @@ -1303,7 +1297,7 @@ iwn5000_attach(struct iwn_softc *sc, uint16_t pid) sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; - return 0; + DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); } /* @@ -4593,10 +4587,6 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) if (!IEEE80211_AMPDU_RUNNING(tap)) return (EINVAL); - /* NB: clear Fragment Number field. */ - /* XXX move this to net80211 */ - *(uint16_t *)wh->i_seq = 0; - ac = *(int *)tap->txa_private; } diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c index 946b3ed3839f..e92fb1292da2 100644 --- a/sys/dev/ixgbe/ixgbe_82599.c +++ b/sys/dev/ixgbe/ixgbe_82599.c @@ -1750,7 +1750,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; - /* fall through */ + /* FALLTHROUGH */ case 0x0FFF: /* mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANP; @@ -2039,7 +2039,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, DEBUGOUT(" Error on src/dst port\n"); return IXGBE_ERR_CONFIG; } - /* fall through */ + /* FALLTHROUGH */ case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c index b66c4be98871..7a39efa7787d 100644 --- a/sys/dev/ixgbe/ixgbe_common.c +++ b/sys/dev/ixgbe/ixgbe_common.c @@ -269,7 +269,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) if (ret_val != IXGBE_SUCCESS) goto out; - /* fall through - only backplane uses autoc */ + /* only backplane uses autoc */ + /* FALLTHROUGH */ case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: @@ -4756,7 +4757,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; for (; i < (num_pb / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* fall through - configure remaining packet buffers */ + /* configure remaining packet buffers */ + /* FALLTHROUGH */ case PBA_STRATEGY_EQUAL: rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; for (; i < num_pb; i++) diff --git a/sys/dev/usb/quirk/usb_quirk.c b/sys/dev/usb/quirk/usb_quirk.c index 6a817a5d4eab..4506168cefcc 100644 --- a/sys/dev/usb/quirk/usb_quirk.c +++ b/sys/dev/usb/quirk/usb_quirk.c @@ -96,6 +96,7 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = { USB_QUIRK(TELEX, MIC1, 0x009, 0x009, UQ_AU_NO_FRAC), USB_QUIRK(SILICONPORTALS, YAPPHONE, 0x100, 0x100, UQ_AU_INP_ASYNC), USB_QUIRK(LOGITECH, UN53B, 0x0000, 0xffff, UQ_NO_STRINGS), + USB_QUIRK(LOGITECH, G510S, 0x0000, 0xFFFF, UQ_KBD_BOOTPROTO), USB_QUIRK(REALTEK, RTL8196EU, 0x0000, 0xffff, UQ_CFG_INDEX_1), USB_QUIRK(ELSA, MODEM1, 0x0000, 0xffff, UQ_CFG_INDEX_1), USB_QUIRK(PLANEX2, MZKUE150N, 0x0000, 0xffff, UQ_CFG_INDEX_1), diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs index f4d24c8013f4..422021ba94f0 100644 --- a/sys/dev/usb/usbdevs +++ b/sys/dev/usb/usbdevs @@ -2842,6 +2842,7 @@ product LOGITECH UN58A 0xc030 iFeel Mouse product LOGITECH UN53B 0xc032 iFeel MouseMan product LOGITECH WMPAD 0xc208 WingMan GamePad Extreme product LOGITECH WMRPAD 0xc20a WingMan RumblePad +product LOGITECH G510S 0xc22d G510s Keyboard product LOGITECH WMJOY 0xc281 WingMan Force joystick product LOGITECH BB13 0xc401 USB-PS/2 Trackball product LOGITECH RK53 0xc501 Cordless mouse diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c index c957feaeb7f5..22407183fd58 100644 --- a/sys/dev/usb/wlan/if_run.c +++ b/sys/dev/usb/wlan/if_run.c @@ -2824,69 +2824,80 @@ run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen) uint8_t ant, rssi; int8_t nf; - rxwi = mtod(m, struct rt2860_rxwi *); - len = le16toh(rxwi->len) & 0xfff; rxwisize = sizeof(struct rt2860_rxwi); if (sc->mac_ver == 0x5592) rxwisize += sizeof(uint64_t); else if (sc->mac_ver == 0x3593) rxwisize += sizeof(uint32_t); - if (__predict_false(len > dmalen)) { - m_freem(m); - counter_u64_add(ic->ic_ierrors, 1); + + if (__predict_false(dmalen < + rxwisize + sizeof(struct ieee80211_frame_ack))) { + RUN_DPRINTF(sc, RUN_DEBUG_RECV, + "payload is too short: dma length %u < %zu\n", + dmalen, rxwisize + sizeof(struct ieee80211_frame_ack)); + goto fail; + } + + rxwi = mtod(m, struct rt2860_rxwi *); + len = le16toh(rxwi->len) & 0xfff; + + if (__predict_false(len > dmalen - rxwisize)) { RUN_DPRINTF(sc, RUN_DEBUG_RECV, "bad RXWI length %u > %u\n", len, dmalen); - return; + goto fail; } + /* Rx descriptor is located at the end */ rxd = (struct rt2870_rxd *)(mtod(m, caddr_t) + dmalen); flags = le32toh(rxd->flags); if (__predict_false(flags & (RT2860_RX_CRCERR | RT2860_RX_ICVERR))) { - m_freem(m); - counter_u64_add(ic->ic_ierrors, 1); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "%s error.\n", (flags & RT2860_RX_CRCERR)?"CRC":"ICV"); - return; - } - - m->m_data += rxwisize; - m->m_pkthdr.len = m->m_len -= rxwisize; - - wh = mtod(m, struct ieee80211_frame *); - - if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { - wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; - m->m_flags |= M_WEP; + goto fail; } if (flags & RT2860_RX_L2PAD) { + /* + * XXX OpenBSD removes padding between header + * and payload here... + */ RUN_DPRINTF(sc, RUN_DEBUG_RECV, "received RT2860_RX_L2PAD frame\n"); len += 2; } - ni = ieee80211_find_rxnode(ic, - mtod(m, struct ieee80211_frame_min *)); + m->m_data += rxwisize; + m->m_pkthdr.len = m->m_len = len; + + wh = mtod(m, struct ieee80211_frame *); + + /* XXX wrong for monitor mode */ + if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { + wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; + m->m_flags |= M_WEP; + } + + if (len >= sizeof(struct ieee80211_frame_min)) { + ni = ieee80211_find_rxnode(ic, + mtod(m, struct ieee80211_frame_min *)); + } else + ni = NULL; if (__predict_false(flags & RT2860_RX_MICERR)) { /* report MIC failures to net80211 for TKIP */ if (ni != NULL) ieee80211_notify_michael_failure(ni->ni_vap, wh, rxwi->keyidx); - m_freem(m); - counter_u64_add(ic->ic_ierrors, 1); RUN_DPRINTF(sc, RUN_DEBUG_RECV, "MIC error. Someone is lying.\n"); - return; + goto fail; } ant = run_maxrssi_chain(sc, rxwi); rssi = rxwi->rssi[ant]; nf = run_rssi2dbm(sc, rssi, ant); - m->m_pkthdr.len = m->m_len = len; - if (__predict_false(ieee80211_radiotap_active(ic))) { struct run_rx_radiotap_header *tap = &sc->sc_rxtap; uint16_t phy; @@ -2934,6 +2945,12 @@ run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen) } else { (void)ieee80211_input_all(ic, m, rssi, nf); } + + return; + +fail: + m_freem(m); + counter_u64_add(ic->ic_ierrors, 1); } static void @@ -2943,7 +2960,7 @@ run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m = NULL; struct mbuf *m0; - uint32_t dmalen; + uint32_t dmalen, mbuf_len; uint16_t rxwisize; int xferlen; @@ -3049,6 +3066,14 @@ run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) break; } + mbuf_len = dmalen + sizeof(struct rt2870_rxd); + if (__predict_false(mbuf_len > MCLBYTES)) { + RUN_DPRINTF(sc, RUN_DEBUG_RECV_DESC | RUN_DEBUG_USB, + "payload is too big: mbuf_len %u\n", mbuf_len); + counter_u64_add(ic->ic_ierrors, 1); + break; + } + /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { @@ -3058,14 +3083,13 @@ run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) break; } m_copydata(m, 4 /* skip 32-bit DMA-len header */, - dmalen + sizeof(struct rt2870_rxd), mtod(m0, caddr_t)); - m0->m_pkthdr.len = m0->m_len = - dmalen + sizeof(struct rt2870_rxd); + mbuf_len, mtod(m0, caddr_t)); + m0->m_pkthdr.len = m0->m_len = mbuf_len; run_rx_frame(sc, m0, dmalen); /* update data ptr */ - m->m_data += dmalen + 8; - m->m_pkthdr.len = m->m_len -= dmalen + 8; + m->m_data += mbuf_len + 4; + m->m_pkthdr.len = m->m_len -= mbuf_len + 4; } /* make sure we free the source buffer, if any */ diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c index 8685c3957f1f..418d8b5089a5 100644 --- a/sys/dev/usb/wlan/if_urtw.c +++ b/sys/dev/usb/wlan/if_urtw.c @@ -3933,21 +3933,18 @@ urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p, usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); - if (actlen < (int)URTW_MIN_RXBUFSZ) { - counter_u64_add(ic->ic_ierrors, 1); - return (NULL); - } - if (sc->sc_flags & URTW_RTL8187B) { struct urtw_8187b_rxhdr *rx; + if (actlen < sizeof(*rx) + IEEE80211_ACK_LEN) + goto fail; + rx = (struct urtw_8187b_rxhdr *)(data->buf + (actlen - (sizeof(struct urtw_8187b_rxhdr)))); flen = le32toh(rx->flag) & 0xfff; - if (flen > actlen) { - counter_u64_add(ic->ic_ierrors, 1); - return (NULL); - } + if (flen > actlen - sizeof(*rx)) + goto fail; + rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf; /* XXX correct? */ rssi = rx->rssi & URTW_RX_RSSI_MASK; @@ -3955,13 +3952,14 @@ urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p, } else { struct urtw_8187l_rxhdr *rx; + if (actlen < sizeof(*rx) + IEEE80211_ACK_LEN) + goto fail; + rx = (struct urtw_8187l_rxhdr *)(data->buf + (actlen - (sizeof(struct urtw_8187l_rxhdr)))); flen = le32toh(rx->flag) & 0xfff; - if (flen > actlen) { - counter_u64_add(ic->ic_ierrors, 1); - return (NULL); - } + if (flen > actlen - sizeof(*rx)) + goto fail; rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf; /* XXX correct? */ @@ -3969,11 +3967,12 @@ urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p, noise = rx->noise; } + if (flen < IEEE80211_ACK_LEN) + goto fail; + mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); - if (mnew == NULL) { - counter_u64_add(ic->ic_ierrors, 1); - return (NULL); - } + if (mnew == NULL) + goto fail; m = data->m; data->m = mnew; @@ -3992,13 +3991,17 @@ urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p, } wh = mtod(m, struct ieee80211_frame *); - if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) + if (IEEE80211_IS_DATA(wh)) sc->sc_currate = (rate > 0) ? rate : sc->sc_currate; *rssi_p = rssi; *nf_p = noise; /* XXX correct? */ return (m); + +fail: + counter_u64_add(ic->ic_ierrors, 1); + return (NULL); } static void @@ -4006,7 +4009,6 @@ urtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtw_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; - struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct urtw_data *data; @@ -4044,9 +4046,13 @@ urtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) */ URTW_UNLOCK(sc); if (m != NULL) { - wh = mtod(m, struct ieee80211_frame *); - ni = ieee80211_find_rxnode(ic, - (struct ieee80211_frame_min *)wh); + if (m->m_pkthdr.len >= + sizeof(struct ieee80211_frame_min)) { + ni = ieee80211_find_rxnode(ic, + mtod(m, struct ieee80211_frame_min *)); + } else + ni = NULL; + if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); /* node is no longer needed */ diff --git a/sys/dev/usb/wlan/if_urtwvar.h b/sys/dev/usb/wlan/if_urtwvar.h index 08ffc8f3b303..1b5e2cd17a80 100644 --- a/sys/dev/usb/wlan/if_urtwvar.h +++ b/sys/dev/usb/wlan/if_urtwvar.h @@ -47,10 +47,6 @@ struct urtw_data { }; typedef STAILQ_HEAD(, urtw_data) urtw_datahead; -/* XXX not correct.. */ -#define URTW_MIN_RXBUFSZ \ - (sizeof(struct ieee80211_frame_min)) - #define URTW_RX_DATA_LIST_COUNT 4 #define URTW_TX_DATA_LIST_COUNT 16 #define URTW_RX_MAXSIZE 0x9c4 diff --git a/sys/dev/vmware/vmxnet3/if_vmx.c b/sys/dev/vmware/vmxnet3/if_vmx.c index b3cb9df49821..38b61d3a7e8a 100644 --- a/sys/dev/vmware/vmxnet3/if_vmx.c +++ b/sys/dev/vmware/vmxnet3/if_vmx.c @@ -1,6 +1,7 @@ /*- * Copyright (c) 2013 Tsubai Masanari * Copyright (c) 2013 Bryan Venteicher + * Copyright (c) 2018 Patrick Kelsey * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,7 +25,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include @@ -34,7 +34,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include @@ -46,8 +45,7 @@ __FBSDID("$FreeBSD$"); #include #include #include - -#include +#include #include #include @@ -57,8 +55,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include - #include #include #include @@ -67,139 +63,95 @@ __FBSDID("$FreeBSD$"); #include #include +#include "ifdi_if.h" + #include "if_vmxreg.h" #include "if_vmxvar.h" #include "opt_inet.h" #include "opt_inet6.h" -#ifdef VMXNET3_FAILPOINTS -#include -static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0, - "vmxnet3 fail points"); -#define VMXNET3_FP _debug_fail_point_vmxnet3 -#endif -static int vmxnet3_probe(device_t); -static int vmxnet3_attach(device_t); -static int vmxnet3_detach(device_t); -static int vmxnet3_shutdown(device_t); +#define VMXNET3_VMWARE_VENDOR_ID 0x15AD +#define VMXNET3_VMWARE_DEVICE_ID 0x07B0 + +static pci_vendor_info_t vmxnet3_vendor_info_array[] = +{ + PVID(VMXNET3_VMWARE_VENDOR_ID, VMXNET3_VMWARE_DEVICE_ID, "VMware VMXNET3 Ethernet Adapter"), + /* required last entry */ + PVID_END +}; + +static void *vmxnet3_register(device_t); +static int vmxnet3_attach_pre(if_ctx_t); +static int vmxnet3_msix_intr_assign(if_ctx_t, int); +static void vmxnet3_free_irqs(struct vmxnet3_softc *); +static int vmxnet3_attach_post(if_ctx_t); +static int vmxnet3_detach(if_ctx_t); +static int vmxnet3_shutdown(if_ctx_t); +static int vmxnet3_suspend(if_ctx_t); +static int vmxnet3_resume(if_ctx_t); static int vmxnet3_alloc_resources(struct vmxnet3_softc *); static void vmxnet3_free_resources(struct vmxnet3_softc *); static int vmxnet3_check_version(struct vmxnet3_softc *); -static void vmxnet3_initial_config(struct vmxnet3_softc *); -static void vmxnet3_check_multiqueue(struct vmxnet3_softc *); +static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *); -static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int, - struct vmxnet3_interrupt *); -static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *); -static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); -static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); -static int vmxnet3_setup_interrupts(struct vmxnet3_softc *); -static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); - -static void vmxnet3_free_interrupt(struct vmxnet3_softc *, - struct vmxnet3_interrupt *); -static void vmxnet3_free_interrupts(struct vmxnet3_softc *); - -#ifndef VMXNET3_LEGACY_TX -static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_start_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *); -static void vmxnet3_free_taskqueue(struct vmxnet3_softc *); -#endif - -static int vmxnet3_init_rxq(struct vmxnet3_softc *, int); -static int vmxnet3_init_txq(struct vmxnet3_softc *, int); -static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); -static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); -static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); -static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); +static int vmxnet3_queues_shared_alloc(struct vmxnet3_softc *); +static void vmxnet3_init_txq(struct vmxnet3_softc *, int); +static int vmxnet3_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static void vmxnet3_init_rxq(struct vmxnet3_softc *, int, int); +static int vmxnet3_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static void vmxnet3_queues_free(if_ctx_t); static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); static void vmxnet3_free_shared_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); -static void vmxnet3_free_txq_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); -static void vmxnet3_free_rxq_data(struct vmxnet3_softc *); -static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); -static void vmxnet3_free_queue_data(struct vmxnet3_softc *); static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); +static void vmxnet3_free_mcast_table(struct vmxnet3_softc *); static void vmxnet3_init_shared_data(struct vmxnet3_softc *); -static void vmxnet3_init_hwassist(struct vmxnet3_softc *); -static void vmxnet3_reinit_interface(struct vmxnet3_softc *); static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); static int vmxnet3_alloc_data(struct vmxnet3_softc *); static void vmxnet3_free_data(struct vmxnet3_softc *); -static int vmxnet3_setup_interface(struct vmxnet3_softc *); static void vmxnet3_evintr(struct vmxnet3_softc *); -static void vmxnet3_txq_eof(struct vmxnet3_txqueue *); -static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); -static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); -static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, - struct vmxnet3_rxring *, int); -static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *); -static void vmxnet3_legacy_intr(void *); -static void vmxnet3_txq_intr(void *); -static void vmxnet3_rxq_intr(void *); -static void vmxnet3_event_intr(void *); +static int vmxnet3_isc_txd_encap(void *, if_pkt_info_t); +static void vmxnet3_isc_txd_flush(void *, uint16_t, qidx_t); +static int vmxnet3_isc_txd_credits_update(void *, uint16_t, bool); +static int vmxnet3_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); +static int vmxnet3_isc_rxd_pkt_get(void *, if_rxd_info_t); +static void vmxnet3_isc_rxd_refill(void *, if_rxd_update_t); +static void vmxnet3_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); +static int vmxnet3_legacy_intr(void *); +static int vmxnet3_rxq_intr(void *); +static int vmxnet3_event_intr(void *); -static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); -static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); -static void vmxnet3_stop(struct vmxnet3_softc *); +static void vmxnet3_stop(if_ctx_t); static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); -static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); -static int vmxnet3_reinit_queues(struct vmxnet3_softc *); +static void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); +static void vmxnet3_reinit_queues(struct vmxnet3_softc *); static int vmxnet3_enable_device(struct vmxnet3_softc *); static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); -static int vmxnet3_reinit(struct vmxnet3_softc *); -static void vmxnet3_init_locked(struct vmxnet3_softc *); -static void vmxnet3_init(void *); - -static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *, - int *, int *, int *); -static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, - bus_dmamap_t, bus_dma_segment_t [], int *); -static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); -static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); -#ifdef VMXNET3_LEGACY_TX -static void vmxnet3_start_locked(struct ifnet *); -static void vmxnet3_start(struct ifnet *); -#else -static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *, - struct mbuf *); -static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *); -static void vmxnet3_txq_tq_deferred(void *, int); -#endif -static void vmxnet3_txq_start(struct vmxnet3_txqueue *); -static void vmxnet3_tx_start_all(struct vmxnet3_softc *); +static void vmxnet3_init(if_ctx_t); +static void vmxnet3_multi_set(if_ctx_t); +static int vmxnet3_mtu_set(if_ctx_t, uint32_t); +static void vmxnet3_media_status(if_ctx_t, struct ifmediareq *); +static int vmxnet3_media_change(if_ctx_t); +static int vmxnet3_promisc_set(if_ctx_t, int); +static uint64_t vmxnet3_get_counter(if_ctx_t, ift_counter); +static void vmxnet3_update_admin_status(if_ctx_t); +static void vmxnet3_txq_timer(if_ctx_t, uint16_t); static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int, uint16_t); -static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t); -static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t); -static void vmxnet3_set_rxfilter(struct vmxnet3_softc *); -static int vmxnet3_change_mtu(struct vmxnet3_softc *, int); -static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t); -static uint64_t vmxnet3_get_counter(struct ifnet *, ift_counter); +static void vmxnet3_vlan_register(if_ctx_t, uint16_t); +static void vmxnet3_vlan_unregister(if_ctx_t, uint16_t); +static void vmxnet3_set_rxfilter(struct vmxnet3_softc *, int); -#ifndef VMXNET3_LEGACY_TX -static void vmxnet3_qflush(struct ifnet *); -#endif - -static int vmxnet3_watchdog(struct vmxnet3_txqueue *); static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *); -static void vmxnet3_tick(void *); +static int vmxnet3_link_is_up(struct vmxnet3_softc *); static void vmxnet3_link_status(struct vmxnet3_softc *); -static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *); -static int vmxnet3_media_change(struct ifnet *); static void vmxnet3_set_lladdr(struct vmxnet3_softc *); static void vmxnet3_get_lladdr(struct vmxnet3_softc *); @@ -219,17 +171,13 @@ static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t, static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t); static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t); +static int vmxnet3_tx_queue_intr_enable(if_ctx_t, uint16_t); +static int vmxnet3_rx_queue_intr_enable(if_ctx_t, uint16_t); +static void vmxnet3_link_intr_enable(if_ctx_t); static void vmxnet3_enable_intr(struct vmxnet3_softc *, int); static void vmxnet3_disable_intr(struct vmxnet3_softc *, int); -static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); -static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); - -static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, - bus_size_t, struct vmxnet3_dma_alloc *); -static void vmxnet3_dma_free(struct vmxnet3_softc *, - struct vmxnet3_dma_alloc *); -static int vmxnet3_tunable_int(struct vmxnet3_softc *, - const char *, int); +static void vmxnet3_intr_enable_all(if_ctx_t); +static void vmxnet3_intr_disable_all(if_ctx_t); typedef enum { VMXNET3_BARRIER_RD, @@ -239,25 +187,16 @@ typedef enum { static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t); -/* Tunables. */ -static int vmxnet3_mq_disable = 0; -TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable); -static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES; -TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue); -static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES; -TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue); -static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC; -TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc); -static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC; -TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc); static device_method_t vmxnet3_methods[] = { - /* Device interface. */ - DEVMETHOD(device_probe, vmxnet3_probe), - DEVMETHOD(device_attach, vmxnet3_attach), - DEVMETHOD(device_detach, vmxnet3_detach), - DEVMETHOD(device_shutdown, vmxnet3_shutdown), - + /* Device interface */ + DEVMETHOD(device_register, vmxnet3_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), + DEVMETHOD(device_suspend, iflib_device_suspend), + DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; @@ -267,147 +206,376 @@ static driver_t vmxnet3_driver = { static devclass_t vmxnet3_devclass; DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0); +IFLIB_PNP_INFO(pci, vmx, vmxnet3_vendor_info_array); +MODULE_VERSION(vmx, 2); MODULE_DEPEND(vmx, pci, 1, 1, 1); MODULE_DEPEND(vmx, ether, 1, 1, 1); +MODULE_DEPEND(vmx, iflib, 1, 1, 1); -#define VMXNET3_VMWARE_VENDOR_ID 0x15AD -#define VMXNET3_VMWARE_DEVICE_ID 0x07B0 +static device_method_t vmxnet3_iflib_methods[] = { + DEVMETHOD(ifdi_tx_queues_alloc, vmxnet3_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, vmxnet3_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, vmxnet3_queues_free), -static int -vmxnet3_probe(device_t dev) + DEVMETHOD(ifdi_attach_pre, vmxnet3_attach_pre), + DEVMETHOD(ifdi_attach_post, vmxnet3_attach_post), + DEVMETHOD(ifdi_detach, vmxnet3_detach), + + DEVMETHOD(ifdi_init, vmxnet3_init), + DEVMETHOD(ifdi_stop, vmxnet3_stop), + DEVMETHOD(ifdi_multi_set, vmxnet3_multi_set), + DEVMETHOD(ifdi_mtu_set, vmxnet3_mtu_set), + DEVMETHOD(ifdi_media_status, vmxnet3_media_status), + DEVMETHOD(ifdi_media_change, vmxnet3_media_change), + DEVMETHOD(ifdi_promisc_set, vmxnet3_promisc_set), + DEVMETHOD(ifdi_get_counter, vmxnet3_get_counter), + DEVMETHOD(ifdi_update_admin_status, vmxnet3_update_admin_status), + DEVMETHOD(ifdi_timer, vmxnet3_txq_timer), + + DEVMETHOD(ifdi_tx_queue_intr_enable, vmxnet3_tx_queue_intr_enable), + DEVMETHOD(ifdi_rx_queue_intr_enable, vmxnet3_rx_queue_intr_enable), + DEVMETHOD(ifdi_link_intr_enable, vmxnet3_link_intr_enable), + DEVMETHOD(ifdi_intr_enable, vmxnet3_intr_enable_all), + DEVMETHOD(ifdi_intr_disable, vmxnet3_intr_disable_all), + DEVMETHOD(ifdi_msix_intr_assign, vmxnet3_msix_intr_assign), + + DEVMETHOD(ifdi_vlan_register, vmxnet3_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, vmxnet3_vlan_unregister), + + DEVMETHOD(ifdi_shutdown, vmxnet3_shutdown), + DEVMETHOD(ifdi_suspend, vmxnet3_suspend), + DEVMETHOD(ifdi_resume, vmxnet3_resume), + + DEVMETHOD_END +}; + +static driver_t vmxnet3_iflib_driver = { + "vmx", vmxnet3_iflib_methods, sizeof(struct vmxnet3_softc) +}; + +struct if_txrx vmxnet3_txrx = { + .ift_txd_encap = vmxnet3_isc_txd_encap, + .ift_txd_flush = vmxnet3_isc_txd_flush, + .ift_txd_credits_update = vmxnet3_isc_txd_credits_update, + .ift_rxd_available = vmxnet3_isc_rxd_available, + .ift_rxd_pkt_get = vmxnet3_isc_rxd_pkt_get, + .ift_rxd_refill = vmxnet3_isc_rxd_refill, + .ift_rxd_flush = vmxnet3_isc_rxd_flush, + .ift_legacy_intr = vmxnet3_legacy_intr +}; + +static struct if_shared_ctx vmxnet3_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = 512, + + .isc_tx_maxsize = VMXNET3_TX_MAXSIZE, + .isc_tx_maxsegsize = VMXNET3_TX_MAXSEGSIZE, + .isc_tso_maxsize = VMXNET3_TSO_MAXSIZE + sizeof(struct ether_vlan_header), + .isc_tso_maxsegsize = VMXNET3_TX_MAXSEGSIZE, + + /* + * These values are used to configure the busdma tag used for + * receive descriptors. Each receive descriptor only points to one + * buffer. + */ + .isc_rx_maxsize = VMXNET3_RX_MAXSEGSIZE, /* One buf per descriptor */ + .isc_rx_nsegments = 1, /* One mapping per descriptor */ + .isc_rx_maxsegsize = VMXNET3_RX_MAXSEGSIZE, + + .isc_admin_intrcnt = 1, + .isc_vendor_info = vmxnet3_vendor_info_array, + .isc_driver_version = "2", + .isc_driver = &vmxnet3_iflib_driver, + .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ, + + /* + * Number of receive queues per receive queue set, with associated + * descriptor settings for each. + */ + .isc_nrxqs = 3, + .isc_nfl = 2, /* one free list for each receive command queue */ + .isc_nrxd_min = {VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC}, + .isc_nrxd_max = {VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC}, + .isc_nrxd_default = {VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC}, + + /* + * Number of transmit queues per transmit queue set, with associated + * descriptor settings for each. + */ + .isc_ntxqs = 2, + .isc_ntxd_min = {VMXNET3_MIN_TX_NDESC, VMXNET3_MIN_TX_NDESC}, + .isc_ntxd_max = {VMXNET3_MAX_TX_NDESC, VMXNET3_MAX_TX_NDESC}, + .isc_ntxd_default = {VMXNET3_DEF_TX_NDESC, VMXNET3_DEF_TX_NDESC}, +}; + +static void * +vmxnet3_register(device_t dev) { - - if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID && - pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) { - device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter"); - return (BUS_PROBE_DEFAULT); - } - - return (ENXIO); + return (&vmxnet3_sctx_init); } static int -vmxnet3_attach(device_t dev) +vmxnet3_attach_pre(if_ctx_t ctx) { + device_t dev; + if_softc_ctx_t scctx; struct vmxnet3_softc *sc; + uint32_t intr_config; int error; - sc = device_get_softc(dev); + dev = iflib_get_dev(ctx); + sc = iflib_get_softc(ctx); sc->vmx_dev = dev; + sc->vmx_ctx = ctx; + sc->vmx_sctx = iflib_get_sctx(ctx); + sc->vmx_scctx = iflib_get_softc_ctx(ctx); + sc->vmx_ifp = iflib_get_ifp(ctx); + sc->vmx_media = iflib_get_media(ctx); + scctx = sc->vmx_scctx; - pci_enable_busmaster(dev); + scctx->isc_tx_nsegments = VMXNET3_TX_MAXSEGS; + scctx->isc_tx_tso_segments_max = VMXNET3_TX_MAXSEGS; + /* isc_tx_tso_size_max doesn't include possible vlan header */ + scctx->isc_tx_tso_size_max = VMXNET3_TSO_MAXSIZE; + scctx->isc_tx_tso_segsize_max = VMXNET3_TX_MAXSEGSIZE; + scctx->isc_txrx = &vmxnet3_txrx; - VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev)); - callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0); + /* If 0, the iflib tunable was not set, so set to the default */ + if (scctx->isc_nrxqsets == 0) + scctx->isc_nrxqsets = VMXNET3_DEF_RX_QUEUES; + scctx->isc_nrxqsets_max = min(VMXNET3_MAX_RX_QUEUES, mp_ncpus); - vmxnet3_initial_config(sc); + /* If 0, the iflib tunable was not set, so set to the default */ + if (scctx->isc_ntxqsets == 0) + scctx->isc_ntxqsets = VMXNET3_DEF_TX_QUEUES; + scctx->isc_ntxqsets_max = min(VMXNET3_MAX_TX_QUEUES, mp_ncpus); + /* + * Enforce that the transmit completion queue descriptor count is + * the same as the transmit command queue descriptor count. + */ + scctx->isc_ntxd[0] = scctx->isc_ntxd[1]; + scctx->isc_txqsizes[0] = + sizeof(struct vmxnet3_txcompdesc) * scctx->isc_ntxd[0]; + scctx->isc_txqsizes[1] = + sizeof(struct vmxnet3_txdesc) * scctx->isc_ntxd[1]; + + /* + * Enforce that the receive completion queue descriptor count is the + * sum of the receive command queue descriptor counts, and that the + * second receive command queue descriptor count is the same as the + * first one. + */ + scctx->isc_nrxd[2] = scctx->isc_nrxd[1]; + scctx->isc_nrxd[0] = scctx->isc_nrxd[1] + scctx->isc_nrxd[2]; + scctx->isc_rxqsizes[0] = + sizeof(struct vmxnet3_rxcompdesc) * scctx->isc_nrxd[0]; + scctx->isc_rxqsizes[1] = + sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[1]; + scctx->isc_rxqsizes[2] = + sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[2]; + + scctx->isc_rss_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; + + /* Map PCI BARs */ error = vmxnet3_alloc_resources(sc); if (error) goto fail; + /* Check device versions */ error = vmxnet3_check_version(sc); if (error) goto fail; - error = vmxnet3_alloc_rxtx_queues(sc); - if (error) - goto fail; + /* + * The interrupt mode can be set in the hypervisor configuration via + * the parameter ethernet.intrMode. + */ + intr_config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); + sc->vmx_intr_mask_mode = (intr_config >> 2) & 0x03; -#ifndef VMXNET3_LEGACY_TX - error = vmxnet3_alloc_taskqueue(sc); - if (error) - goto fail; -#endif - - error = vmxnet3_alloc_interrupts(sc); - if (error) - goto fail; - - vmxnet3_check_multiqueue(sc); - - error = vmxnet3_alloc_data(sc); - if (error) - goto fail; - - error = vmxnet3_setup_interface(sc); - if (error) - goto fail; - - error = vmxnet3_setup_interrupts(sc); - if (error) { - ether_ifdetach(sc->vmx_ifp); - device_printf(dev, "could not set up interrupt\n"); - goto fail; + /* + * Configure the softc context to attempt to configure the interrupt + * mode now indicated by intr_config. iflib will follow the usual + * fallback path MSIX -> MSI -> LEGACY, starting at the configured + * starting mode. + */ + switch (intr_config & 0x03) { + case VMXNET3_IT_AUTO: + case VMXNET3_IT_MSIX: + scctx->isc_msix_bar = pci_msix_table_bar(dev); + break; + case VMXNET3_IT_MSI: + scctx->isc_msix_bar = -1; + scctx->isc_disable_msix = 1; + break; + case VMXNET3_IT_LEGACY: + scctx->isc_msix_bar = 0; + break; } - vmxnet3_setup_sysctl(sc); -#ifndef VMXNET3_LEGACY_TX - vmxnet3_start_taskqueue(sc); -#endif + scctx->isc_tx_csum_flags = VMXNET3_CSUM_ALL_OFFLOAD; + scctx->isc_capabilities = scctx->isc_capenable = + IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | + IFCAP_TSO4 | IFCAP_TSO6 | + IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | + IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | + IFCAP_JUMBO_MTU; + /* These capabilities are not enabled by default. */ + scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; + + vmxnet3_get_lladdr(sc); + iflib_set_mac(ctx, sc->vmx_lladdr); + + return (0); fail: - if (error) - vmxnet3_detach(dev); + /* + * We must completely clean up anything allocated above as iflib + * will not invoke any other driver entry points as a result of this + * failure. + */ + vmxnet3_free_resources(sc); return (error); } static int -vmxnet3_detach(device_t dev) +vmxnet3_msix_intr_assign(if_ctx_t ctx, int msix) { struct vmxnet3_softc *sc; - struct ifnet *ifp; + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + int error; + int i; + char irq_name[16]; - sc = device_get_softc(dev); - ifp = sc->vmx_ifp; + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_nrxqsets; i++) { + snprintf(irq_name, sizeof(irq_name), "rxq%d", i); - if (device_is_attached(dev)) { - VMXNET3_CORE_LOCK(sc); - vmxnet3_stop(sc); - VMXNET3_CORE_UNLOCK(sc); - - callout_drain(&sc->vmx_tick); -#ifndef VMXNET3_LEGACY_TX - vmxnet3_drain_taskqueue(sc); -#endif - - ether_ifdetach(ifp); + rxq = &sc->vmx_rxq[i]; + error = iflib_irq_alloc_generic(ctx, &rxq->vxrxq_irq, i + 1, + IFLIB_INTR_RX, vmxnet3_rxq_intr, rxq, i, irq_name); + if (error) { + device_printf(iflib_get_dev(ctx), + "Failed to register rxq %d interrupt handler\n", i); + return (error); + } } - if (sc->vmx_vlan_attach != NULL) { - EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach); - sc->vmx_vlan_attach = NULL; - } - if (sc->vmx_vlan_detach != NULL) { - EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach); - sc->vmx_vlan_detach = NULL; + for (i = 0; i < scctx->isc_ntxqsets; i++) { + snprintf(irq_name, sizeof(irq_name), "txq%d", i); + + /* + * Don't provide the corresponding rxq irq for reference - + * we want the transmit task to be attached to a task queue + * that is different from the one used by the corresponding + * rxq irq. That is because the TX doorbell writes are very + * expensive as virtualized MMIO operations, so we want to + * be able to defer them to another core when possible so + * that they don't steal receive processing cycles during + * stack turnarounds like TCP ACK generation. The other + * piece to this approach is enabling the iflib abdicate + * option (currently via an interface-specific + * tunable/sysctl). + */ + iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, + irq_name); } -#ifndef VMXNET3_LEGACY_TX - vmxnet3_free_taskqueue(sc); -#endif - vmxnet3_free_interrupts(sc); - - if (ifp != NULL) { - if_free(ifp); - sc->vmx_ifp = NULL; + error = iflib_irq_alloc_generic(ctx, &sc->vmx_event_intr_irq, + scctx->isc_nrxqsets + 1, IFLIB_INTR_ADMIN, vmxnet3_event_intr, sc, 0, + "event"); + if (error) { + device_printf(iflib_get_dev(ctx), + "Failed to register event interrupt handler\n"); + return (error); } - ifmedia_removeall(&sc->vmx_media); + return (0); +} +static void +vmxnet3_free_irqs(struct vmxnet3_softc *sc) +{ + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + int i; + + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_nrxqsets; i++) { + rxq = &sc->vmx_rxq[i]; + iflib_irq_free(sc->vmx_ctx, &rxq->vxrxq_irq); + } + + iflib_irq_free(sc->vmx_ctx, &sc->vmx_event_intr_irq); +} + +static int +vmxnet3_attach_post(if_ctx_t ctx) +{ + device_t dev; + if_softc_ctx_t scctx; + struct vmxnet3_softc *sc; + int error; + + dev = iflib_get_dev(ctx); + scctx = iflib_get_softc_ctx(ctx); + sc = iflib_get_softc(ctx); + + if (scctx->isc_nrxqsets > 1) + sc->vmx_flags |= VMXNET3_FLAG_RSS; + + error = vmxnet3_alloc_data(sc); + if (error) + goto fail; + + vmxnet3_set_interrupt_idx(sc); + vmxnet3_setup_sysctl(sc); + + ifmedia_add(sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(sc->vmx_media, IFM_ETHER | IFM_AUTO); + +fail: + return (error); +} + +static int +vmxnet3_detach(if_ctx_t ctx) +{ + struct vmxnet3_softc *sc; + + sc = iflib_get_softc(ctx); + + vmxnet3_free_irqs(sc); vmxnet3_free_data(sc); vmxnet3_free_resources(sc); - vmxnet3_free_rxtx_queues(sc); - - VMXNET3_CORE_LOCK_DESTROY(sc); return (0); } static int -vmxnet3_shutdown(device_t dev) +vmxnet3_shutdown(if_ctx_t ctx) +{ + + return (0); +} + +static int +vmxnet3_suspend(if_ctx_t ctx) +{ + + return (0); +} + +static int +vmxnet3_resume(if_ctx_t ctx) { return (0); @@ -445,15 +613,6 @@ vmxnet3_alloc_resources(struct vmxnet3_softc *sc) sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1); sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1); - if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { - rid = PCIR_BAR(2); - sc->vmx_msix_res = bus_alloc_resource_any(dev, - SYS_RES_MEMORY, &rid, RF_ACTIVE); - } - - if (sc->vmx_msix_res == NULL) - sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; - return (0); } @@ -476,13 +635,6 @@ vmxnet3_free_resources(struct vmxnet3_softc *sc) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1); sc->vmx_res1 = NULL; } - - if (sc->vmx_msix_res != NULL) { - rid = PCIR_BAR(2); - bus_release_resource(dev, SYS_RES_MEMORY, rid, - sc->vmx_msix_res); - sc->vmx_msix_res = NULL; - } } static int @@ -511,603 +663,282 @@ vmxnet3_check_version(struct vmxnet3_softc *sc) return (0); } -static int -trunc_powerof2(int val) -{ - - return (1U << (fls(val) - 1)); -} - -static void -vmxnet3_initial_config(struct vmxnet3_softc *sc) -{ - int nqueue, ndesc; - - nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue); - if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1) - nqueue = VMXNET3_DEF_TX_QUEUES; - if (nqueue > mp_ncpus) - nqueue = mp_ncpus; - sc->vmx_max_ntxqueues = trunc_powerof2(nqueue); - - nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue); - if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1) - nqueue = VMXNET3_DEF_RX_QUEUES; - if (nqueue > mp_ncpus) - nqueue = mp_ncpus; - sc->vmx_max_nrxqueues = trunc_powerof2(nqueue); - - if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) { - sc->vmx_max_nrxqueues = 1; - sc->vmx_max_ntxqueues = 1; - } - - ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc); - if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC) - ndesc = VMXNET3_DEF_TX_NDESC; - if (ndesc & VMXNET3_MASK_TX_NDESC) - ndesc &= ~VMXNET3_MASK_TX_NDESC; - sc->vmx_ntxdescs = ndesc; - - ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc); - if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC) - ndesc = VMXNET3_DEF_RX_NDESC; - if (ndesc & VMXNET3_MASK_RX_NDESC) - ndesc &= ~VMXNET3_MASK_RX_NDESC; - sc->vmx_nrxdescs = ndesc; - sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; -} - -static void -vmxnet3_check_multiqueue(struct vmxnet3_softc *sc) -{ - - if (sc->vmx_intr_type != VMXNET3_IT_MSIX) - goto out; - - /* BMV: Just use the maximum configured for now. */ - sc->vmx_nrxqueues = sc->vmx_max_nrxqueues; - sc->vmx_ntxqueues = sc->vmx_max_ntxqueues; - - if (sc->vmx_nrxqueues > 1) - sc->vmx_flags |= VMXNET3_FLAG_RSS; - - return; - -out: - sc->vmx_ntxqueues = 1; - sc->vmx_nrxqueues = 1; -} - -static int -vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - int nmsix, cnt, required; - - dev = sc->vmx_dev; - - if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) - return (1); - - /* Allocate an additional vector for the events interrupt. */ - required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1; - - nmsix = pci_msix_count(dev); - if (nmsix < required) - return (1); - - cnt = required; - if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { - sc->vmx_nintrs = required; - return (0); - } else - pci_release_msi(dev); - - /* BMV TODO Fallback to sharing MSIX vectors if possible. */ - - return (1); -} - -static int -vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - int nmsi, cnt, required; - - dev = sc->vmx_dev; - required = 1; - - nmsi = pci_msi_count(dev); - if (nmsi < required) - return (1); - - cnt = required; - if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { - sc->vmx_nintrs = 1; - return (0); - } else - pci_release_msi(dev); - - return (1); -} - -static int -vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) -{ - - sc->vmx_nintrs = 1; - return (0); -} - -static int -vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags, - struct vmxnet3_interrupt *intr) -{ - struct resource *irq; - - irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags); - if (irq == NULL) - return (ENXIO); - - intr->vmxi_irq = irq; - intr->vmxi_rid = rid; - - return (0); -} - -static int -vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc) -{ - int i, rid, flags, error; - - rid = 0; - flags = RF_ACTIVE; - - if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) - flags |= RF_SHAREABLE; - else - rid = 1; - - for (i = 0; i < sc->vmx_nintrs; i++, rid++) { - error = vmxnet3_alloc_interrupt(sc, rid, flags, - &sc->vmx_intrs[i]); - if (error) - return (error); - } - - return (0); -} - -static int -vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_interrupt *intr; - enum intr_type type; - int i, error; - - dev = sc->vmx_dev; - intr = &sc->vmx_intrs[0]; - type = INTR_TYPE_NET | INTR_MPSAFE; - - for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) { - txq = &sc->vmx_txq[i]; - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_txq_intr, txq, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, - "tq%d", i); - txq->vxtxq_intr_idx = intr->vmxi_rid - 1; - } - - for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) { - rxq = &sc->vmx_rxq[i]; - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_rxq_intr, rxq, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, - "rq%d", i); - rxq->vxrxq_intr_idx = intr->vmxi_rid - 1; - } - - error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, - vmxnet3_event_intr, sc, &intr->vmxi_handler); - if (error) - return (error); - bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event"); - sc->vmx_event_intr_idx = intr->vmxi_rid - 1; - - return (0); -} - -static int -vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) -{ - struct vmxnet3_interrupt *intr; - int i, error; - - intr = &sc->vmx_intrs[0]; - error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq, - INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc, - &intr->vmxi_handler); - - for (i = 0; i < sc->vmx_ntxqueues; i++) - sc->vmx_txq[i].vxtxq_intr_idx = 0; - for (i = 0; i < sc->vmx_nrxqueues; i++) - sc->vmx_rxq[i].vxrxq_intr_idx = 0; - sc->vmx_event_intr_idx = 0; - - return (error); -} - static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) { + if_softc_ctx_t scctx; struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxq_shared *rxs; + int intr_idx; int i; - sc->vmx_ds->evintr = sc->vmx_event_intr_idx; + scctx = sc->vmx_scctx; - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - txs = txq->vxtxq_ts; - txs->intr_idx = txq->vxtxq_intr_idx; - } + /* + * There is either one interrupt, or there is one interrupt per + * receive queue. If there is one interrupt, then all interrupt + * indexes are zero. If there is one interrupt per receive queue, + * the transmit queue interrupt indexes are assigned the receive + * queue interrupt indexesin round-robin fashion. + * + * The event interrupt is always the last interrupt index. + */ + sc->vmx_event_intr_idx = scctx->isc_vectors - 1; - for (i = 0; i < sc->vmx_nrxqueues; i++) { + intr_idx = 0; + for (i = 0; i < scctx->isc_nrxqsets; i++, intr_idx++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; + rxq->vxrxq_intr_idx = intr_idx; rxs->intr_idx = rxq->vxrxq_intr_idx; } + + /* + * Assign the tx queues interrupt indexes above what we are actually + * using. These interrupts will never be enabled. + */ + intr_idx = scctx->isc_vectors; + for (i = 0; i < scctx->isc_ntxqsets; i++, intr_idx++) { + txq = &sc->vmx_txq[i]; + txs = txq->vxtxq_ts; + txq->vxtxq_intr_idx = intr_idx; + txs->intr_idx = txq->vxtxq_intr_idx; + } } static int -vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) +vmxnet3_queues_shared_alloc(struct vmxnet3_softc *sc) { + if_softc_ctx_t scctx; + int size; int error; + + scctx = sc->vmx_scctx; - error = vmxnet3_alloc_intr_resources(sc); - if (error) + /* + * The txq and rxq shared data areas must be allocated contiguously + * as vmxnet3_driver_shared contains only a single address member + * for the shared queue data area. + */ + size = scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared) + + scctx->isc_nrxqsets * sizeof(struct vmxnet3_rxq_shared); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128, &sc->vmx_qs_dma, 0); + if (error) { + device_printf(sc->vmx_dev, "cannot alloc queue shared memory\n"); return (error); - - switch (sc->vmx_intr_type) { - case VMXNET3_IT_MSIX: - error = vmxnet3_setup_msix_interrupts(sc); - break; - case VMXNET3_IT_MSI: - case VMXNET3_IT_LEGACY: - error = vmxnet3_setup_legacy_interrupt(sc); - break; - default: - panic("%s: invalid interrupt type %d", __func__, - sc->vmx_intr_type); } - if (error == 0) - vmxnet3_set_interrupt_idx(sc); - - return (error); -} - -static int -vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) -{ - device_t dev; - uint32_t config; - int error; - - dev = sc->vmx_dev; - config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); - - sc->vmx_intr_type = config & 0x03; - sc->vmx_intr_mask_mode = (config >> 2) & 0x03; - - switch (sc->vmx_intr_type) { - case VMXNET3_IT_AUTO: - sc->vmx_intr_type = VMXNET3_IT_MSIX; - /* FALLTHROUGH */ - case VMXNET3_IT_MSIX: - error = vmxnet3_alloc_msix_interrupts(sc); - if (error == 0) - break; - sc->vmx_intr_type = VMXNET3_IT_MSI; - /* FALLTHROUGH */ - case VMXNET3_IT_MSI: - error = vmxnet3_alloc_msi_interrupts(sc); - if (error == 0) - break; - sc->vmx_intr_type = VMXNET3_IT_LEGACY; - /* FALLTHROUGH */ - case VMXNET3_IT_LEGACY: - error = vmxnet3_alloc_legacy_interrupts(sc); - if (error == 0) - break; - /* FALLTHROUGH */ - default: - sc->vmx_intr_type = -1; - device_printf(dev, "cannot allocate any interrupt resources\n"); - return (ENXIO); - } - - return (error); -} - -static void -vmxnet3_free_interrupt(struct vmxnet3_softc *sc, - struct vmxnet3_interrupt *intr) -{ - device_t dev; - - dev = sc->vmx_dev; - - if (intr->vmxi_handler != NULL) { - bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler); - intr->vmxi_handler = NULL; - } - - if (intr->vmxi_irq != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid, - intr->vmxi_irq); - intr->vmxi_irq = NULL; - intr->vmxi_rid = -1; - } -} - -static void -vmxnet3_free_interrupts(struct vmxnet3_softc *sc) -{ - int i; - - for (i = 0; i < sc->vmx_nintrs; i++) - vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]); - - if (sc->vmx_intr_type == VMXNET3_IT_MSI || - sc->vmx_intr_type == VMXNET3_IT_MSIX) - pci_release_msi(sc->vmx_dev); -} - -#ifndef VMXNET3_LEGACY_TX -static int -vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc) -{ - device_t dev; - - dev = sc->vmx_dev; - - sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT, - taskqueue_thread_enqueue, &sc->vmx_tq); - if (sc->vmx_tq == NULL) - return (ENOMEM); - return (0); } static void -vmxnet3_start_taskqueue(struct vmxnet3_softc *sc) -{ - device_t dev; - int nthreads, error; - - dev = sc->vmx_dev; - - /* - * The taskqueue is typically not frequently used, so a dedicated - * thread for each queue is unnecessary. - */ - nthreads = MAX(1, sc->vmx_ntxqueues / 2); - - /* - * Most drivers just ignore the return value - it only fails - * with ENOMEM so an error is not likely. It is hard for us - * to recover from an error here. - */ - error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET, - "%s taskq", device_get_nameunit(dev)); - if (error) - device_printf(dev, "failed to start taskqueue: %d", error); -} - -static void -vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc) +vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) { struct vmxnet3_txqueue *txq; - int i; + struct vmxnet3_comp_ring *txc; + struct vmxnet3_txring *txr; + if_softc_ctx_t scctx; + + txq = &sc->vmx_txq[q]; + txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; + scctx = sc->vmx_scctx; - if (sc->vmx_tq != NULL) { - for (i = 0; i < sc->vmx_max_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask); - } + snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", + device_get_nameunit(sc->vmx_dev), q); + + txq->vxtxq_sc = sc; + txq->vxtxq_id = q; + txc->vxcr_ndesc = scctx->isc_ntxd[0]; + txr->vxtxr_ndesc = scctx->isc_ntxd[1]; +} + +static int +vmxnet3_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, + int ntxqs, int ntxqsets) +{ + struct vmxnet3_softc *sc; + int q; + int error; + caddr_t kva; + + sc = iflib_get_softc(ctx); + + /* Allocate the array of transmit queues */ + sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * + ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->vmx_txq == NULL) + return (ENOMEM); + + /* Initialize driver state for each transmit queue */ + for (q = 0; q < ntxqsets; q++) + vmxnet3_init_txq(sc, q); + + /* + * Allocate queue state that is shared with the device. This check + * and call is performed in both vmxnet3_tx_queues_alloc() and + * vmxnet3_rx_queues_alloc() so that we don't have to care which + * order iflib invokes those routines in. + */ + if (sc->vmx_qs_dma.idi_size == 0) { + error = vmxnet3_queues_shared_alloc(sc); + if (error) + return (error); } + + kva = sc->vmx_qs_dma.idi_vaddr; + for (q = 0; q < ntxqsets; q++) { + sc->vmx_txq[q].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; + kva += sizeof(struct vmxnet3_txq_shared); + } + + /* Record descriptor ring vaddrs and paddrs */ + for (q = 0; q < ntxqsets; q++) { + struct vmxnet3_txqueue *txq; + struct vmxnet3_txring *txr; + struct vmxnet3_comp_ring *txc; + + txq = &sc->vmx_txq[q]; + txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; + + /* Completion ring */ + txc->vxcr_u.txcd = + (struct vmxnet3_txcompdesc *) vaddrs[q * ntxqs + 0]; + txc->vxcr_paddr = paddrs[q * ntxqs + 0]; + + /* Command ring */ + txr->vxtxr_txd = + (struct vmxnet3_txdesc *) vaddrs[q * ntxqs + 1]; + txr->vxtxr_paddr = paddrs[q * ntxqs + 1]; + } + + return (0); } static void -vmxnet3_free_taskqueue(struct vmxnet3_softc *sc) -{ - if (sc->vmx_tq != NULL) { - taskqueue_free(sc->vmx_tq); - sc->vmx_tq = NULL; - } -} -#endif - -static int -vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) +vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q, int nrxqs) { struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxring *rxr; + if_softc_ctx_t scctx; int i; rxq = &sc->vmx_rxq[q]; + rxc = &rxq->vxrxq_comp_ring; + scctx = sc->vmx_scctx; snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d", device_get_nameunit(sc->vmx_dev), q); - mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF); rxq->vxrxq_sc = sc; rxq->vxrxq_id = q; - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { + /* + * First rxq is the completion queue, so there are nrxqs - 1 command + * rings starting at iflib queue id 1. + */ + rxc->vxcr_ndesc = scctx->isc_nrxd[0]; + for (i = 0; i < nrxqs - 1; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_rid = i; - rxr->vxrxr_ndesc = sc->vmx_nrxdescs; - rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO); - if (rxr->vxrxr_rxbuf == NULL) - return (ENOMEM); - - rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; + rxr->vxrxr_ndesc = scctx->isc_nrxd[i + 1]; } - - return (0); } static int -vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) +vmxnet3_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, + int nrxqs, int nrxqsets) { - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + int q; + int i; + int error; + caddr_t kva; + + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - - snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", - device_get_nameunit(sc->vmx_dev), q); - mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF); - - txq->vxtxq_sc = sc; - txq->vxtxq_id = q; - - txr->vxtxr_ndesc = sc->vmx_ntxdescs; - txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc * - sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO); - if (txr->vxtxr_txbuf == NULL) + /* Allocate the array of receive queues */ + sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * + nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->vmx_rxq == NULL) return (ENOMEM); - txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; - -#ifndef VMXNET3_LEGACY_TX - TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq); - - txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF, - M_NOWAIT, &txq->vxtxq_mtx); - if (txq->vxtxq_br == NULL) - return (ENOMEM); -#endif - - return (0); -} - -static int -vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) -{ - int i, error; + /* Initialize driver state for each receive queue */ + for (q = 0; q < nrxqsets; q++) + vmxnet3_init_rxq(sc, q, nrxqs); /* - * Only attempt to create multiple queues if MSIX is available. MSIX is - * disabled by default because its apparently broken for devices passed - * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable - * must be set to zero for MSIX. This check prevents us from allocating - * queue structures that we will not use. + * Allocate queue state that is shared with the device. This check + * and call is performed in both vmxnet3_tx_queues_alloc() and + * vmxnet3_rx_queues_alloc() so that we don't have to care which + * order iflib invokes those routines in. */ - if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) { - sc->vmx_max_nrxqueues = 1; - sc->vmx_max_ntxqueues = 1; - } - - sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * - sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); - sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * - sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); - if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL) - return (ENOMEM); - - for (i = 0; i < sc->vmx_max_nrxqueues; i++) { - error = vmxnet3_init_rxq(sc, i); + if (sc->vmx_qs_dma.idi_size == 0) { + error = vmxnet3_queues_shared_alloc(sc); if (error) return (error); } - for (i = 0; i < sc->vmx_max_ntxqueues; i++) { - error = vmxnet3_init_txq(sc, i); - if (error) - return (error); + kva = sc->vmx_qs_dma.idi_vaddr + + scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared); + for (q = 0; q < nrxqsets; q++) { + sc->vmx_rxq[q].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; + kva += sizeof(struct vmxnet3_rxq_shared); } - return (0); -} + /* Record descriptor ring vaddrs and paddrs */ + for (q = 0; q < nrxqsets; q++) { + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_rxring *rxr; + struct vmxnet3_comp_ring *rxc; -static void -vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_rxring *rxr; - int i; + rxq = &sc->vmx_rxq[q]; + rxc = &rxq->vxrxq_comp_ring; - rxq->vxrxq_sc = NULL; - rxq->vxrxq_id = -1; + /* Completion ring */ + rxc->vxcr_u.rxcd = + (struct vmxnet3_rxcompdesc *) vaddrs[q * nrxqs + 0]; + rxc->vxcr_paddr = paddrs[q * nrxqs + 0]; - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; + /* Command ring(s) */ + for (i = 0; i < nrxqs - 1; i++) { + rxr = &rxq->vxrxq_cmd_ring[i]; - if (rxr->vxrxr_rxbuf != NULL) { - free(rxr->vxrxr_rxbuf, M_DEVBUF); - rxr->vxrxr_rxbuf = NULL; + rxr->vxrxr_rxd = + (struct vmxnet3_rxdesc *) vaddrs[q * nrxqs + 1 + i]; + rxr->vxrxr_paddr = paddrs[q * nrxqs + 1 + i]; } } - if (mtx_initialized(&rxq->vxrxq_mtx) != 0) - mtx_destroy(&rxq->vxrxq_mtx); + return (0); } static void -vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) +vmxnet3_queues_free(if_ctx_t ctx) { - struct vmxnet3_txring *txr; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; + sc = iflib_get_softc(ctx); - txq->vxtxq_sc = NULL; - txq->vxtxq_id = -1; - -#ifndef VMXNET3_LEGACY_TX - if (txq->vxtxq_br != NULL) { - buf_ring_free(txq->vxtxq_br, M_DEVBUF); - txq->vxtxq_br = NULL; - } -#endif - - if (txr->vxtxr_txbuf != NULL) { - free(txr->vxtxr_txbuf, M_DEVBUF); - txr->vxtxr_txbuf = NULL; + /* Free queue state area that is shared with the device */ + if (sc->vmx_qs_dma.idi_size != 0) { + iflib_dma_free(&sc->vmx_qs_dma); + sc->vmx_qs_dma.idi_size = 0; } - if (mtx_initialized(&txq->vxtxq_mtx) != 0) - mtx_destroy(&txq->vxtxq_mtx); -} - -static void -vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) -{ - int i; - + /* Free array of receive queues */ if (sc->vmx_rxq != NULL) { - for (i = 0; i < sc->vmx_max_nrxqueues; i++) - vmxnet3_destroy_rxq(&sc->vmx_rxq[i]); free(sc->vmx_rxq, M_DEVBUF); sc->vmx_rxq = NULL; } + /* Free array of transmit queues */ if (sc->vmx_txq != NULL) { - for (i = 0; i < sc->vmx_max_ntxqueues; i++) - vmxnet3_destroy_txq(&sc->vmx_txq[i]); free(sc->vmx_txq, M_DEVBUF); sc->vmx_txq = NULL; } @@ -1117,48 +948,31 @@ static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) { device_t dev; - uint8_t *kva; size_t size; - int i, error; + int error; dev = sc->vmx_dev; + /* Top level state structure shared with the device */ size = sizeof(struct vmxnet3_driver_shared); - error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 1, &sc->vmx_ds_dma, 0); if (error) { device_printf(dev, "cannot alloc shared memory\n"); return (error); } - sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; - - size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + - sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); - error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); - if (error) { - device_printf(dev, "cannot alloc queue shared memory\n"); - return (error); - } - sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; - kva = sc->vmx_qs; - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; - kva += sizeof(struct vmxnet3_txq_shared); - } - for (i = 0; i < sc->vmx_nrxqueues; i++) { - sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; - kva += sizeof(struct vmxnet3_rxq_shared); - } + sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.idi_vaddr; + /* RSS table state shared with the device */ if (sc->vmx_flags & VMXNET3_FLAG_RSS) { size = sizeof(struct vmxnet3_rss_shared); - error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma); + error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128, + &sc->vmx_rss_dma, 0); if (error) { device_printf(dev, "cannot alloc rss shared memory\n"); return (error); } sc->vmx_rss = - (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr; + (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.idi_vaddr; } return (0); @@ -1168,321 +982,31 @@ static void vmxnet3_free_shared_data(struct vmxnet3_softc *sc) { + /* Free RSS table state shared with the device */ if (sc->vmx_rss != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_rss_dma); + iflib_dma_free(&sc->vmx_rss_dma); sc->vmx_rss = NULL; } - if (sc->vmx_qs != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_qs_dma); - sc->vmx_qs = NULL; - } - + /* Free top level state structure shared with the device */ if (sc->vmx_ds != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_ds_dma); + iflib_dma_free(&sc->vmx_ds_dma); sc->vmx_ds = NULL; } } -static int -vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct vmxnet3_comp_ring *txc; - size_t descsz, compsz; - int i, q, error; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_ntxqueues; q++) { - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - txc = &txq->vxtxq_comp_ring; - - descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); - compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - 1, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - VMXNET3_TX_MAXSIZE, /* maxsize */ - VMXNET3_TX_MAXSEGS, /* nsegments */ - VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &txr->vxtxr_txtag); - if (error) { - device_printf(dev, - "unable to create Tx buffer tag for queue %d\n", q); - return (error); - } - - error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); - if (error) { - device_printf(dev, "cannot alloc Tx descriptors for " - "queue %d error %d\n", q, error); - return (error); - } - txr->vxtxr_txd = - (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; - - error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); - if (error) { - device_printf(dev, "cannot alloc Tx comp descriptors " - "for queue %d error %d\n", q, error); - return (error); - } - txc->vxcr_u.txcd = - (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; - - for (i = 0; i < txr->vxtxr_ndesc; i++) { - error = bus_dmamap_create(txr->vxtxr_txtag, 0, - &txr->vxtxr_txbuf[i].vtxb_dmamap); - if (error) { - device_printf(dev, "unable to create Tx buf " - "dmamap for queue %d idx %d\n", q, i); - return (error); - } - } - } - - return (0); -} - -static void -vmxnet3_free_txq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct vmxnet3_comp_ring *txc; - struct vmxnet3_txbuf *txb; - int i, q; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_ntxqueues; q++) { - txq = &sc->vmx_txq[q]; - txr = &txq->vxtxq_cmd_ring; - txc = &txq->vxtxq_comp_ring; - - for (i = 0; i < txr->vxtxr_ndesc; i++) { - txb = &txr->vxtxr_txbuf[i]; - if (txb->vtxb_dmamap != NULL) { - bus_dmamap_destroy(txr->vxtxr_txtag, - txb->vtxb_dmamap); - txb->vtxb_dmamap = NULL; - } - } - - if (txc->vxcr_u.txcd != NULL) { - vmxnet3_dma_free(sc, &txc->vxcr_dma); - txc->vxcr_u.txcd = NULL; - } - - if (txr->vxtxr_txd != NULL) { - vmxnet3_dma_free(sc, &txr->vxtxr_dma); - txr->vxtxr_txd = NULL; - } - - if (txr->vxtxr_txtag != NULL) { - bus_dma_tag_destroy(txr->vxtxr_txtag); - txr->vxtxr_txtag = NULL; - } - } -} - -static int -vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - int descsz, compsz; - int i, j, q, error; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_nrxqueues; q++) { - rxq = &sc->vmx_rxq[q]; - rxc = &rxq->vxrxq_comp_ring; - compsz = 0; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - descsz = rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxdesc); - compsz += rxr->vxrxr_ndesc * - sizeof(struct vmxnet3_rxcompdesc); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - 1, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MJUMPAGESIZE, /* maxsize */ - 1, /* nsegments */ - MJUMPAGESIZE, /* maxsegsize */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &rxr->vxrxr_rxtag); - if (error) { - device_printf(dev, - "unable to create Rx buffer tag for " - "queue %d\n", q); - return (error); - } - - error = vmxnet3_dma_malloc(sc, descsz, 512, - &rxr->vxrxr_dma); - if (error) { - device_printf(dev, "cannot allocate Rx " - "descriptors for queue %d/%d error %d\n", - i, q, error); - return (error); - } - rxr->vxrxr_rxd = - (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; - } - - error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); - if (error) { - device_printf(dev, "cannot alloc Rx comp descriptors " - "for queue %d error %d\n", q, error); - return (error); - } - rxc->vxcr_u.rxcd = - (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, - &rxr->vxrxr_spare_dmap); - if (error) { - device_printf(dev, "unable to create spare " - "dmamap for queue %d/%d error %d\n", - q, i, error); - return (error); - } - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, - &rxr->vxrxr_rxbuf[j].vrxb_dmamap); - if (error) { - device_printf(dev, "unable to create " - "dmamap for queue %d/%d slot %d " - "error %d\n", - q, i, j, error); - return (error); - } - } - } - } - - return (0); -} - -static void -vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) -{ - device_t dev; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - struct vmxnet3_rxbuf *rxb; - int i, j, q; - - dev = sc->vmx_dev; - - for (q = 0; q < sc->vmx_nrxqueues; q++) { - rxq = &sc->vmx_rxq[q]; - rxc = &rxq->vxrxq_comp_ring; - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - if (rxr->vxrxr_spare_dmap != NULL) { - bus_dmamap_destroy(rxr->vxrxr_rxtag, - rxr->vxrxr_spare_dmap); - rxr->vxrxr_spare_dmap = NULL; - } - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - rxb = &rxr->vxrxr_rxbuf[j]; - if (rxb->vrxb_dmamap != NULL) { - bus_dmamap_destroy(rxr->vxrxr_rxtag, - rxb->vrxb_dmamap); - rxb->vrxb_dmamap = NULL; - } - } - } - - if (rxc->vxcr_u.rxcd != NULL) { - vmxnet3_dma_free(sc, &rxc->vxcr_dma); - rxc->vxcr_u.rxcd = NULL; - } - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - if (rxr->vxrxr_rxd != NULL) { - vmxnet3_dma_free(sc, &rxr->vxrxr_dma); - rxr->vxrxr_rxd = NULL; - } - - if (rxr->vxrxr_rxtag != NULL) { - bus_dma_tag_destroy(rxr->vxrxr_rxtag); - rxr->vxrxr_rxtag = NULL; - } - } - } -} - -static int -vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) -{ - int error; - - error = vmxnet3_alloc_txq_data(sc); - if (error) - return (error); - - error = vmxnet3_alloc_rxq_data(sc); - if (error) - return (error); - - return (0); -} - -static void -vmxnet3_free_queue_data(struct vmxnet3_softc *sc) -{ - - if (sc->vmx_rxq != NULL) - vmxnet3_free_rxq_data(sc); - - if (sc->vmx_txq != NULL) - vmxnet3_free_txq_data(sc); -} - static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) { int error; - error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, - 32, &sc->vmx_mcast_dma); + /* Multicast table state shared with the device */ + error = iflib_dma_alloc_align(sc->vmx_ctx, + VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 32, &sc->vmx_mcast_dma, 0); if (error) device_printf(sc->vmx_dev, "unable to alloc multicast table\n"); else - sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; + sc->vmx_mcast = sc->vmx_mcast_dma.idi_vaddr; return (error); } @@ -1491,8 +1015,9 @@ static void vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) { + /* Free multicast table state shared with the device */ if (sc->vmx_mcast != NULL) { - vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); + iflib_dma_free(&sc->vmx_mcast_dma); sc->vmx_mcast = NULL; } } @@ -1501,6 +1026,8 @@ static void vmxnet3_init_shared_data(struct vmxnet3_softc *sc) { struct vmxnet3_driver_shared *ds; + if_shared_ctx_t sctx; + if_softc_ctx_t scctx; struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; @@ -1508,6 +1035,8 @@ vmxnet3_init_shared_data(struct vmxnet3_softc *sc) int i; ds = sc->vmx_ds; + sctx = sc->vmx_sctx; + scctx = sc->vmx_scctx; /* * Initialize fields of the shared data that remains the same across @@ -1530,91 +1059,68 @@ vmxnet3_init_shared_data(struct vmxnet3_softc *sc) /* Misc. conf */ ds->driver_data = vtophys(sc); ds->driver_data_len = sizeof(struct vmxnet3_softc); - ds->queue_shared = sc->vmx_qs_dma.dma_paddr; - ds->queue_shared_len = sc->vmx_qs_dma.dma_size; - ds->nrxsg_max = sc->vmx_max_rxsegs; + ds->queue_shared = sc->vmx_qs_dma.idi_paddr; + ds->queue_shared_len = sc->vmx_qs_dma.idi_size; + ds->nrxsg_max = IFLIB_MAX_RX_SEGS; /* RSS conf */ if (sc->vmx_flags & VMXNET3_FLAG_RSS) { ds->rss.version = 1; - ds->rss.paddr = sc->vmx_rss_dma.dma_paddr; - ds->rss.len = sc->vmx_rss_dma.dma_size; + ds->rss.paddr = sc->vmx_rss_dma.idi_paddr; + ds->rss.len = sc->vmx_rss_dma.idi_size; } /* Interrupt control. */ ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; - ds->nintr = sc->vmx_nintrs; + /* + * Total number of interrupt indexes we are using in the shared + * config data, even though we don't actually allocate MSIX + * resources for the tx queues. Some versions of the device will + * fail to initialize successfully if interrupt indexes are used in + * the shared config that exceed the number of interrupts configured + * here. + */ + ds->nintr = (scctx->isc_vectors == 1) ? + 1 : (scctx->isc_nrxqsets + scctx->isc_ntxqsets + 1); ds->evintr = sc->vmx_event_intr_idx; ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) + for (i = 0; i < ds->nintr; i++) ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; /* Receive filter. */ - ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; - ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; + ds->mcast_table = sc->vmx_mcast_dma.idi_paddr; + ds->mcast_tablelen = sc->vmx_mcast_dma.idi_size; /* Tx queues */ - for (i = 0; i < sc->vmx_ntxqueues; i++) { + for (i = 0; i < scctx->isc_ntxqsets; i++) { txq = &sc->vmx_txq[i]; txs = txq->vxtxq_ts; - txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; + txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_paddr; txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; - txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; + txs->comp_ring = txq->vxtxq_comp_ring.vxcr_paddr; txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; txs->driver_data = vtophys(txq); txs->driver_data_len = sizeof(struct vmxnet3_txqueue); } /* Rx queues */ - for (i = 0; i < sc->vmx_nrxqueues; i++) { + for (i = 0; i < scctx->isc_nrxqsets; i++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; - rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; + rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_paddr; rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; - rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; + rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_paddr; rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; - rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; + rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_paddr; rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; rxs->driver_data = vtophys(rxq); rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); } } -static void -vmxnet3_init_hwassist(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp = sc->vmx_ifp; - uint64_t hwassist; - - hwassist = 0; - if (ifp->if_capenable & IFCAP_TXCSUM) - hwassist |= VMXNET3_CSUM_OFFLOAD; - if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) - hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6; - if (ifp->if_capenable & IFCAP_TSO4) - hwassist |= CSUM_IP_TSO; - if (ifp->if_capenable & IFCAP_TSO6) - hwassist |= CSUM_IP6_TSO; - ifp->if_hwassist = hwassist; -} - -static void -vmxnet3_reinit_interface(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp; - - ifp = sc->vmx_ifp; - - /* Use the current MAC address. */ - bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); - vmxnet3_set_lladdr(sc); - - vmxnet3_init_hwassist(sc); -} - static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) { @@ -1631,10 +1137,12 @@ vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) }; struct vmxnet3_driver_shared *ds; + if_softc_ctx_t scctx; struct vmxnet3_rss_shared *rss; int i; - + ds = sc->vmx_ds; + scctx = sc->vmx_scctx; rss = sc->vmx_rss; rss->hash_type = @@ -1646,7 +1154,7 @@ vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE); for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++) - rss->ind_table[i] = i % sc->vmx_nrxqueues; + rss->ind_table[i] = i % scctx->isc_nrxqsets; } static void @@ -1654,13 +1162,15 @@ vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; - + if_softc_ctx_t scctx; + ifp = sc->vmx_ifp; ds = sc->vmx_ds; - + scctx = sc->vmx_scctx; + ds->mtu = ifp->if_mtu; - ds->ntxqueue = sc->vmx_ntxqueues; - ds->nrxqueue = sc->vmx_nrxqueues; + ds->ntxqueue = scctx->isc_ntxqsets; + ds->nrxqueue = scctx->isc_nrxqsets; ds->upt_features = 0; if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) @@ -1675,9 +1185,9 @@ vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) vmxnet3_reinit_rss_shared_data(sc); } - vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); + vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.idi_paddr); vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, - (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); + (uint64_t) sc->vmx_ds_dma.idi_paddr >> 32); } static int @@ -1689,10 +1199,6 @@ vmxnet3_alloc_data(struct vmxnet3_softc *sc) if (error) return (error); - error = vmxnet3_alloc_queue_data(sc); - if (error) - return (error); - error = vmxnet3_alloc_mcast_table(sc); if (error) return (error); @@ -1707,105 +1213,27 @@ vmxnet3_free_data(struct vmxnet3_softc *sc) { vmxnet3_free_mcast_table(sc); - vmxnet3_free_queue_data(sc); vmxnet3_free_shared_data(sc); } -static int -vmxnet3_setup_interface(struct vmxnet3_softc *sc) -{ - device_t dev; - struct ifnet *ifp; - - dev = sc->vmx_dev; - - ifp = sc->vmx_ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) { - device_printf(dev, "cannot allocate ifnet structure\n"); - return (ENOSPC); - } - - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); -#if __FreeBSD_version < 1000025 - ifp->if_baudrate = 1000000000; -#elif __FreeBSD_version < 1100011 - if_initbaudrate(ifp, IF_Gbps(10)); -#else - ifp->if_baudrate = IF_Gbps(10); -#endif - ifp->if_softc = sc; - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_init = vmxnet3_init; - ifp->if_ioctl = vmxnet3_ioctl; - ifp->if_get_counter = vmxnet3_get_counter; - ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); - ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS; - ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE; - -#ifdef VMXNET3_LEGACY_TX - ifp->if_start = vmxnet3_start; - ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1; - IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1); - IFQ_SET_READY(&ifp->if_snd); -#else - ifp->if_transmit = vmxnet3_txq_mq_start; - ifp->if_qflush = vmxnet3_qflush; -#endif - - vmxnet3_get_lladdr(sc); - ether_ifattach(ifp, sc->vmx_lladdr); - - ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM; - ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; - ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; - ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | - IFCAP_VLAN_HWCSUM; - ifp->if_capenable = ifp->if_capabilities; - - /* These capabilities are not enabled by default. */ - ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; - - sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST); - sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config, - vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); - - ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change, - vmxnet3_media_status); - ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); - - return (0); -} - static void vmxnet3_evintr(struct vmxnet3_softc *sc) { device_t dev; - struct ifnet *ifp; struct vmxnet3_txq_shared *ts; struct vmxnet3_rxq_shared *rs; uint32_t event; - int reset; dev = sc->vmx_dev; - ifp = sc->vmx_ifp; - reset = 0; - - VMXNET3_CORE_LOCK(sc); /* Clear events. */ event = sc->vmx_ds->event; vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); - if (event & VMXNET3_EVENT_LINK) { + if (event & VMXNET3_EVENT_LINK) vmxnet3_link_status(sc); - if (sc->vmx_link_active != 0) - vmxnet3_tx_start_all(sc); - } if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { - reset = 1; vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); ts = sc->vmx_txq[0].vxtxq_ts; if (ts->stopped != 0) @@ -1813,45 +1241,155 @@ vmxnet3_evintr(struct vmxnet3_softc *sc) rs = sc->vmx_rxq[0].vxrxq_rs; if (rs->stopped != 0) device_printf(dev, "Rx queue error %#x\n", rs->error); - device_printf(dev, "Rx/Tx queue error event ... resetting\n"); + + /* XXX - rely on liflib watchdog to reset us? */ + device_printf(dev, "Rx/Tx queue error event ... " + "waiting for iflib watchdog reset\n"); } if (event & VMXNET3_EVENT_DIC) device_printf(dev, "device implementation change event\n"); if (event & VMXNET3_EVENT_DEBUG) device_printf(dev, "debug event\n"); +} - if (reset != 0) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); +static int +vmxnet3_isc_txd_encap(void *vsc, if_pkt_info_t pi) +{ + struct vmxnet3_softc *sc; + struct vmxnet3_txqueue *txq; + struct vmxnet3_txring *txr; + struct vmxnet3_txdesc *txd, *sop; + bus_dma_segment_t *segs; + int nsegs; + int pidx; + int hdrlen; + int i; + int gen; + + sc = vsc; + txq = &sc->vmx_txq[pi->ipi_qsidx]; + txr = &txq->vxtxq_cmd_ring; + segs = pi->ipi_segs; + nsegs = pi->ipi_nsegs; + pidx = pi->ipi_pidx; + + KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, + ("%s: packet with too many segments %d", __func__, nsegs)); + + sop = &txr->vxtxr_txd[pidx]; + gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ + + for (i = 0; i < nsegs; i++) { + txd = &txr->vxtxr_txd[pidx]; + + txd->addr = segs[i].ds_addr; + txd->len = segs[i].ds_len; + txd->gen = gen; + txd->dtype = 0; + txd->offload_mode = VMXNET3_OM_NONE; + txd->offload_pos = 0; + txd->hlen = 0; + txd->eop = 0; + txd->compreq = 0; + txd->vtag_mode = 0; + txd->vtag = 0; + + if (++pidx == txr->vxtxr_ndesc) { + pidx = 0; + txr->vxtxr_gen ^= 1; + } + gen = txr->vxtxr_gen; + } + txd->eop = 1; + txd->compreq = !!(pi->ipi_flags & IPI_TX_INTR); + pi->ipi_new_pidx = pidx; + + /* + * VLAN + */ + if (pi->ipi_mflags & M_VLANTAG) { + sop->vtag_mode = 1; + sop->vtag = pi->ipi_vtag; } - VMXNET3_CORE_UNLOCK(sc); + /* + * TSO and checksum offloads + */ + hdrlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen; + if (pi->ipi_csum_flags & CSUM_TSO) { + sop->offload_mode = VMXNET3_OM_TSO; + sop->hlen = hdrlen; + sop->offload_pos = pi->ipi_tso_segsz; + } else if (pi->ipi_csum_flags & (VMXNET3_CSUM_OFFLOAD | + VMXNET3_CSUM_OFFLOAD_IPV6)) { + sop->offload_mode = VMXNET3_OM_CSUM; + sop->hlen = hdrlen; + sop->offload_pos = hdrlen + + ((pi->ipi_ipproto == IPPROTO_TCP) ? + offsetof(struct tcphdr, th_sum) : + offsetof(struct udphdr, uh_sum)); + } + + /* Finally, change the ownership. */ + vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); + sop->gen ^= 1; + + return (0); } static void -vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) +vmxnet3_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx) { struct vmxnet3_softc *sc; - struct ifnet *ifp; - struct vmxnet3_txring *txr; + struct vmxnet3_txqueue *txq; + + sc = vsc; + txq = &sc->vmx_txq[txqid]; + + /* + * pidx is what we last set ipi_new_pidx to in + * vmxnet3_isc_txd_encap() + */ + + /* + * Avoid expensive register updates if the flush request is + * redundant. + */ + if (txq->vxtxq_last_flush == pidx) + return; + txq->vxtxq_last_flush = pidx; + vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), pidx); +} + +static int +vmxnet3_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear) +{ + struct vmxnet3_softc *sc; + struct vmxnet3_txqueue *txq; struct vmxnet3_comp_ring *txc; struct vmxnet3_txcompdesc *txcd; - struct vmxnet3_txbuf *txb; - struct mbuf *m; - u_int sop; - - sc = txq->vxtxq_sc; - ifp = sc->vmx_ifp; - txr = &txq->vxtxq_cmd_ring; + struct vmxnet3_txring *txr; + int processed; + + sc = vsc; + txq = &sc->vmx_txq[txqid]; txc = &txq->vxtxq_comp_ring; + txr = &txq->vxtxq_cmd_ring; - VMXNET3_TXQ_LOCK_ASSERT(txq); - + /* + * If clear is true, we need to report the number of TX command ring + * descriptors that have been processed by the device. If clear is + * false, we just need to report whether or not at least one TX + * command ring descriptor has been processed by the device. + */ + processed = 0; for (;;) { txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; if (txcd->gen != txc->vxcr_gen) break; + else if (!clear) + return (1); vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); if (++txc->vxcr_next == txc->vxcr_ndesc) { @@ -1859,585 +1397,343 @@ vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) txc->vxcr_gen ^= 1; } - sop = txr->vxtxr_next; - txb = &txr->vxtxr_txbuf[sop]; - - if ((m = txb->vtxb_m) != NULL) { - bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); - - txq->vxtxq_stats.vmtxs_opackets++; - txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len; - if (m->m_flags & M_MCAST) - txq->vxtxq_stats.vmtxs_omcasts++; - - m_freem(m); - txb->vtxb_m = NULL; - } - + if (txcd->eop_idx < txr->vxtxr_next) + processed += txr->vxtxr_ndesc - + (txr->vxtxr_next - txcd->eop_idx) + 1; + else + processed += txcd->eop_idx - txr->vxtxr_next + 1; txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; } - if (txr->vxtxr_head == txr->vxtxr_next) - txq->vxtxq_watchdog = 0; + return (processed); } static int -vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) +vmxnet3_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget) { - struct ifnet *ifp; - struct mbuf *m; + struct vmxnet3_softc *sc; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; + struct vmxnet3_rxcompdesc *rxcd; + int avail; + int completed_gen; +#ifdef INVARIANTS + int expect_sop = 1; +#endif + sc = vsc; + rxq = &sc->vmx_rxq[rxqid]; + rxc = &rxq->vxrxq_comp_ring; + + avail = 0; + completed_gen = rxc->vxcr_gen; + for (;;) { + rxcd = &rxc->vxcr_u.rxcd[idx]; + if (rxcd->gen != completed_gen) + break; + vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); + +#ifdef INVARIANTS + if (expect_sop) + KASSERT(rxcd->sop, ("%s: expected sop", __func__)); + else + KASSERT(!rxcd->sop, ("%s: unexpected sop", __func__)); + expect_sop = rxcd->eop; +#endif + if (rxcd->eop && (rxcd->len != 0)) + avail++; + if (avail > budget) + break; + if (++idx == rxc->vxcr_ndesc) { + idx = 0; + completed_gen ^= 1; + } + } + + return (avail); +} + +static int +vmxnet3_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri) +{ + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_comp_ring *rxc; + struct vmxnet3_rxcompdesc *rxcd; + struct vmxnet3_rxring *rxr; struct vmxnet3_rxdesc *rxd; - struct vmxnet3_rxbuf *rxb; - bus_dma_tag_t tag; - bus_dmamap_t dmap; - bus_dma_segment_t segs[1]; - int idx, clsize, btype, flags, nsegs, error; + if_rxd_frag_t frag; + int cqidx; + uint16_t total_len; + uint8_t nfrags; + uint8_t flid; - ifp = sc->vmx_ifp; - tag = rxr->vxrxr_rxtag; - dmap = rxr->vxrxr_spare_dmap; - idx = rxr->vxrxr_fill; - rxd = &rxr->vxrxr_rxd[idx]; - rxb = &rxr->vxrxr_rxbuf[idx]; + sc = vsc; + scctx = sc->vmx_scctx; + rxq = &sc->vmx_rxq[ri->iri_qsidx]; + rxc = &rxq->vxrxq_comp_ring; -#ifdef VMXNET3_FAILPOINTS - KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS); - if (rxr->vxrxr_rid != 0) - KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS); -#endif + /* + * Get a single packet starting at the given index in the completion + * queue. That we have been called indicates that + * vmxnet3_isc_rxd_available() has already verified that either + * there is a complete packet available starting at the given index, + * or there are one or more zero length packets starting at the + * given index followed by a complete packet, so no verification of + * ownership of the descriptors (and no associated read barrier) is + * required here. + */ + cqidx = ri->iri_cidx; + rxcd = &rxc->vxcr_u.rxcd[cqidx]; + while (rxcd->len == 0) { + KASSERT(rxcd->sop && rxcd->eop, + ("%s: zero-length packet without both sop and eop set", + __func__)); + if (++cqidx == rxc->vxcr_ndesc) { + cqidx = 0; + rxc->vxcr_gen ^= 1; + } + rxcd = &rxc->vxcr_u.rxcd[cqidx]; + } + KASSERT(rxcd->sop, ("%s: expected sop", __func__)); - if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) { - flags = M_PKTHDR; - clsize = MCLBYTES; - btype = VMXNET3_BTYPE_HEAD; - } else { -#if __FreeBSD_version < 902001 - /* - * These mbufs will never be used for the start of a frame. - * Roughly prior to branching releng/9.2, the load_mbuf_sg() - * required the mbuf to always be a packet header. Avoid - * unnecessary mbuf initialization in newer versions where - * that is not the case. - */ - flags = M_PKTHDR; -#else - flags = 0; -#endif - clsize = MJUMPAGESIZE; - btype = VMXNET3_BTYPE_BODY; + /* + * RSS and flow ID + */ + ri->iri_flowid = rxcd->rss_hash; + switch (rxcd->rss_type) { + case VMXNET3_RCD_RSS_TYPE_NONE: + ri->iri_flowid = ri->iri_qsidx; + ri->iri_rsstype = M_HASHTYPE_NONE; + break; + case VMXNET3_RCD_RSS_TYPE_IPV4: + ri->iri_rsstype = M_HASHTYPE_RSS_IPV4; + break; + case VMXNET3_RCD_RSS_TYPE_TCPIPV4: + ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4; + break; + case VMXNET3_RCD_RSS_TYPE_IPV6: + ri->iri_rsstype = M_HASHTYPE_RSS_IPV6; + break; + case VMXNET3_RCD_RSS_TYPE_TCPIPV6: + ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6; + break; + default: + ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; + break; } - m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize); - if (m == NULL) { - sc->vmx_stats.vmst_mgetcl_failed++; - return (ENOBUFS); + /* VLAN */ + if (rxcd->vlan) { + ri->iri_flags |= M_VLANTAG; + ri->iri_vtag = rxcd->vtag; } - if (btype == VMXNET3_BTYPE_HEAD) { - m->m_len = m->m_pkthdr.len = clsize; - m_adj(m, ETHER_ALIGN); - } else - m->m_len = clsize; + /* Checksum offload */ + if (!rxcd->no_csum) { + uint32_t csum_flags = 0; - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs, - BUS_DMA_NOWAIT); - if (error) { - m_freem(m); - sc->vmx_stats.vmst_mbuf_load_failed++; - return (error); - } - KASSERT(nsegs == 1, - ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); -#if __FreeBSD_version < 902001 - if (btype == VMXNET3_BTYPE_BODY) - m->m_flags &= ~M_PKTHDR; -#endif - - if (rxb->vrxb_m != NULL) { - bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(tag, rxb->vrxb_dmamap); + if (rxcd->ipv4) { + csum_flags |= CSUM_IP_CHECKED; + if (rxcd->ipcsum_ok) + csum_flags |= CSUM_IP_VALID; + } + if (!rxcd->fragment && (rxcd->tcp || rxcd->udp)) { + csum_flags |= CSUM_L4_CALC; + if (rxcd->csum_ok) { + csum_flags |= CSUM_L4_VALID; + ri->iri_csum_data = 0xffff; + } + } + ri->iri_csum_flags = csum_flags; } - rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; - rxb->vrxb_dmamap = dmap; - rxb->vrxb_m = m; + /* + * The queue numbering scheme used for rxcd->qid is as follows: + * - All of the command ring 0s are numbered [0, nrxqsets - 1] + * - All of the command ring 1s are numbered [nrxqsets, 2*nrxqsets - 1] + * + * Thus, rxcd->qid less than nrxqsets indicates command ring (and + * flid) 0, and rxcd->qid greater than or equal to nrxqsets + * indicates command ring (and flid) 1. + */ + nfrags = 0; + total_len = 0; + do { + rxcd = &rxc->vxcr_u.rxcd[cqidx]; + KASSERT(rxcd->gen == rxc->vxcr_gen, + ("%s: generation mismatch", __func__)); + flid = (rxcd->qid >= scctx->isc_nrxqsets) ? 1 : 0; + rxr = &rxq->vxrxq_cmd_ring[flid]; + rxd = &rxr->vxrxr_rxd[rxcd->rxd_idx]; - rxd->addr = segs[0].ds_addr; - rxd->len = segs[0].ds_len; - rxd->btype = btype; - rxd->gen = rxr->vxrxr_gen; + frag = &ri->iri_frags[nfrags]; + frag->irf_flid = flid; + frag->irf_idx = rxcd->rxd_idx; + frag->irf_len = rxcd->len; + total_len += rxcd->len; + nfrags++; + if (++cqidx == rxc->vxcr_ndesc) { + cqidx = 0; + rxc->vxcr_gen ^= 1; + } + } while (!rxcd->eop); + + ri->iri_cidx = cqidx; + ri->iri_nfrags = nfrags; + ri->iri_len = total_len; - vmxnet3_rxr_increment_fill(rxr); return (0); } static void -vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, - struct vmxnet3_rxring *rxr, int idx) -{ - struct vmxnet3_rxdesc *rxd; - - rxd = &rxr->vxrxr_rxd[idx]; - rxd->gen = rxr->vxrxr_gen; - vmxnet3_rxr_increment_fill(rxr); -} - -static void -vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) +vmxnet3_isc_rxd_refill(void *vsc, if_rxd_update_t iru) { struct vmxnet3_softc *sc; + struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; - struct vmxnet3_rxcompdesc *rxcd; - int idx, eof; - - sc = rxq->vxrxq_sc; - rxc = &rxq->vxrxq_comp_ring; - - do { - rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; - if (rxcd->gen != rxc->vxcr_gen) - break; /* Not expected. */ - vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); - - if (++rxc->vxcr_next == rxc->vxcr_ndesc) { - rxc->vxcr_next = 0; - rxc->vxcr_gen ^= 1; - } - - idx = rxcd->rxd_idx; - eof = rxcd->eop; - if (rxcd->qid < sc->vmx_nrxqueues) - rxr = &rxq->vxrxq_cmd_ring[0]; - else - rxr = &rxq->vxrxq_cmd_ring[1]; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - } while (!eof); -} - -static void -vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) -{ - - if (rxcd->ipv4) { - m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; - if (rxcd->ipcsum_ok) - m->m_pkthdr.csum_flags |= CSUM_IP_VALID; - } - - if (!rxcd->fragment) { - if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) { - m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | - CSUM_PSEUDO_HDR; - m->m_pkthdr.csum_data = 0xFFFF; - } - } -} - -static void -vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, - struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - - sc = rxq->vxrxq_sc; - ifp = sc->vmx_ifp; - - if (rxcd->error) { - rxq->vxrxq_stats.vmrxs_ierrors++; - m_freem(m); - return; - } - -#ifdef notyet - switch (rxcd->rss_type) { - case VMXNET3_RCD_RSS_TYPE_IPV4: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4); - break; - case VMXNET3_RCD_RSS_TYPE_TCPIPV4: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4); - break; - case VMXNET3_RCD_RSS_TYPE_IPV6: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6); - break; - case VMXNET3_RCD_RSS_TYPE_TCPIPV6: - m->m_pkthdr.flowid = rxcd->rss_hash; - M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6); - break; - default: /* VMXNET3_RCD_RSS_TYPE_NONE */ - m->m_pkthdr.flowid = rxq->vxrxq_id; - M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); - break; - } -#else - m->m_pkthdr.flowid = rxq->vxrxq_id; - M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); -#endif - - if (!rxcd->no_csum) - vmxnet3_rx_csum(rxcd, m); - if (rxcd->vlan) { - m->m_flags |= M_VLANTAG; - m->m_pkthdr.ether_vtag = rxcd->vtag; - } - - rxq->vxrxq_stats.vmrxs_ipackets++; - rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len; - - VMXNET3_RXQ_UNLOCK(rxq); - (*ifp->if_input)(ifp, m); - VMXNET3_RXQ_LOCK(rxq); -} - -static void -vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - struct vmxnet3_rxring *rxr; - struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxdesc *rxd; - struct vmxnet3_rxcompdesc *rxcd; - struct mbuf *m, *m_head, *m_tail; - int idx, length; + uint64_t *paddrs; + int count; + int len; + int pidx; + int i; + uint8_t flid; + uint8_t btype; - sc = rxq->vxrxq_sc; - ifp = sc->vmx_ifp; - rxc = &rxq->vxrxq_comp_ring; + count = iru->iru_count; + len = iru->iru_buf_size; + pidx = iru->iru_pidx; + flid = iru->iru_flidx; + paddrs = iru->iru_paddrs; - VMXNET3_RXQ_LOCK_ASSERT(rxq); + sc = vsc; + rxq = &sc->vmx_rxq[iru->iru_qsidx]; + rxr = &rxq->vxrxq_cmd_ring[flid]; + rxd = rxr->vxrxr_rxd; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - return; + /* + * Command ring 0 is filled with BTYPE_HEAD descriptors, and + * command ring 1 is filled with BTYPE_BODY descriptors. + */ + btype = (flid == 0) ? VMXNET3_BTYPE_HEAD : VMXNET3_BTYPE_BODY; + for (i = 0; i < count; i++) { + rxd[pidx].addr = paddrs[i]; + rxd[pidx].len = len; + rxd[pidx].btype = btype; + rxd[pidx].gen = rxr->vxrxr_gen; - m_head = rxq->vxrxq_mhead; - rxq->vxrxq_mhead = NULL; - m_tail = rxq->vxrxq_mtail; - rxq->vxrxq_mtail = NULL; - MPASS(m_head == NULL || m_tail != NULL); - - for (;;) { - rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; - if (rxcd->gen != rxc->vxcr_gen) { - rxq->vxrxq_mhead = m_head; - rxq->vxrxq_mtail = m_tail; - break; - } - vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); - - if (++rxc->vxcr_next == rxc->vxcr_ndesc) { - rxc->vxcr_next = 0; - rxc->vxcr_gen ^= 1; - } - - idx = rxcd->rxd_idx; - length = rxcd->len; - if (rxcd->qid < sc->vmx_nrxqueues) - rxr = &rxq->vxrxq_cmd_ring[0]; - else - rxr = &rxq->vxrxq_cmd_ring[1]; - rxd = &rxr->vxrxr_rxd[idx]; - - m = rxr->vxrxr_rxbuf[idx].vrxb_m; - KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf", - __func__, rxcd->qid, idx)); - - /* - * The host may skip descriptors. We detect this when this - * descriptor does not match the previous fill index. Catch - * up with the host now. - */ - if (__predict_false(rxr->vxrxr_fill != idx)) { - while (rxr->vxrxr_fill != idx) { - rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = - rxr->vxrxr_gen; - vmxnet3_rxr_increment_fill(rxr); - } - } - - if (rxcd->sop) { - KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD, - ("%s: start of frame w/o head buffer", __func__)); - KASSERT(rxr == &rxq->vxrxq_cmd_ring[0], - ("%s: start of frame not in ring 0", __func__)); - KASSERT((idx % sc->vmx_rx_max_chain) == 0, - ("%s: start of frame at unexcepted index %d (%d)", - __func__, idx, sc->vmx_rx_max_chain)); - KASSERT(m_head == NULL, - ("%s: duplicate start of frame?", __func__)); - - if (length == 0) { - /* Just ignore this descriptor. */ - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - goto nextp; - } - - if (vmxnet3_newbuf(sc, rxr) != 0) { - rxq->vxrxq_stats.vmrxs_iqdrops++; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - goto nextp; - } - - m->m_pkthdr.rcvif = ifp; - m->m_pkthdr.len = m->m_len = length; - m->m_pkthdr.csum_flags = 0; - m_head = m_tail = m; - - } else { - KASSERT(rxd->btype == VMXNET3_BTYPE_BODY, - ("%s: non start of frame w/o body buffer", __func__)); - - if (m_head == NULL && m_tail == NULL) { - /* - * This is a continuation of a packet that we - * started to drop, but could not drop entirely - * because this segment was still owned by the - * host. So, drop the remainder now. - */ - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - goto nextp; - } - - KASSERT(m_head != NULL, - ("%s: frame not started?", __func__)); - - if (vmxnet3_newbuf(sc, rxr) != 0) { - rxq->vxrxq_stats.vmrxs_iqdrops++; - vmxnet3_rxq_eof_discard(rxq, rxr, idx); - if (!rxcd->eop) - vmxnet3_rxq_discard_chain(rxq); - m_freem(m_head); - m_head = m_tail = NULL; - goto nextp; - } - - m->m_len = length; - m_head->m_pkthdr.len += length; - m_tail->m_next = m; - m_tail = m; - } - - if (rxcd->eop) { - vmxnet3_rxq_input(rxq, rxcd, m_head); - m_head = m_tail = NULL; - - /* Must recheck after dropping the Rx lock. */ - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; - } - -nextp: - if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { - int qid = rxcd->qid; - bus_size_t r; - - idx = (idx + 1) % rxr->vxrxr_ndesc; - if (qid >= sc->vmx_nrxqueues) { - qid -= sc->vmx_nrxqueues; - r = VMXNET3_BAR0_RXH2(qid); - } else - r = VMXNET3_BAR0_RXH1(qid); - vmxnet3_write_bar0(sc, r, idx); + if (++pidx == rxr->vxrxr_ndesc) { + pidx = 0; + rxr->vxrxr_gen ^= 1; } } } static void +vmxnet3_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) +{ + struct vmxnet3_softc *sc; + struct vmxnet3_rxqueue *rxq; + struct vmxnet3_rxring *rxr; + bus_size_t r; + + sc = vsc; + rxq = &sc->vmx_rxq[rxqid]; + rxr = &rxq->vxrxq_cmd_ring[flid]; + + if (flid == 0) + r = VMXNET3_BAR0_RXH1(rxqid); + else + r = VMXNET3_BAR0_RXH2(rxqid); + + /* + * pidx is the index of the last descriptor with a buffer the device + * can use, and the device needs to be told which index is one past + * that. + */ + if (++pidx == rxr->vxrxr_ndesc) + pidx = 0; + vmxnet3_write_bar0(sc, r, pidx); +} + +static int vmxnet3_legacy_intr(void *xsc) { struct vmxnet3_softc *sc; - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_txqueue *txq; - + if_softc_ctx_t scctx; + if_ctx_t ctx; + sc = xsc; - rxq = &sc->vmx_rxq[0]; - txq = &sc->vmx_txq[0]; + scctx = sc->vmx_scctx; + ctx = sc->vmx_ctx; - if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { + /* + * When there is only a single interrupt configured, this routine + * runs in fast interrupt context, following which the rxq 0 task + * will be enqueued. + */ + if (scctx->isc_intr == IFLIB_INTR_LEGACY) { if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) - return; + return (FILTER_HANDLED); } if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(sc); + vmxnet3_intr_disable_all(ctx); if (sc->vmx_ds->event != 0) - vmxnet3_evintr(sc); + iflib_admin_intr_deferred(ctx); - VMXNET3_RXQ_LOCK(rxq); - vmxnet3_rxq_eof(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_eof(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); - - vmxnet3_enable_all_intrs(sc); + /* + * XXX - When there is both rxq and event activity, do we care + * whether the rxq 0 task or the admin task re-enables the interrupt + * first? + */ + return (FILTER_SCHEDULE_THREAD); } -static void -vmxnet3_txq_intr(void *xtxq) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - - txq = xtxq; - sc = txq->vxtxq_sc; - - if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx); - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_eof(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); - - vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx); -} - -static void -vmxnet3_rxq_intr(void *xrxq) +static int +vmxnet3_rxq_intr(void *vrxq) { struct vmxnet3_softc *sc; struct vmxnet3_rxqueue *rxq; - rxq = xrxq; + rxq = vrxq; sc = rxq->vxrxq_sc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx); - VMXNET3_RXQ_LOCK(rxq); - vmxnet3_rxq_eof(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - - vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx); + return (FILTER_SCHEDULE_THREAD); } -static void -vmxnet3_event_intr(void *xsc) +static int +vmxnet3_event_intr(void *vsc) { struct vmxnet3_softc *sc; - sc = xsc; + sc = vsc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); - if (sc->vmx_ds->event != 0) - vmxnet3_evintr(sc); - - vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); + /* + * The work will be done via vmxnet3_update_admin_status(), and the + * interrupt will be re-enabled in vmxnet3_link_intr_enable(). + * + * The interrupt will be re-enabled by vmxnet3_link_intr_enable(). + */ + return (FILTER_SCHEDULE_THREAD); } static void -vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) +vmxnet3_stop(if_ctx_t ctx) { - struct vmxnet3_txring *txr; - struct vmxnet3_txbuf *txb; - int i; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; + sc = iflib_get_softc(ctx); - for (i = 0; i < txr->vxtxr_ndesc; i++) { - txb = &txr->vxtxr_txbuf[i]; - - if (txb->vtxb_m == NULL) - continue; - - bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); - m_freem(txb->vtxb_m); - txb->vtxb_m = NULL; - } -} - -static void -vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) -{ - struct vmxnet3_rxring *rxr; - struct vmxnet3_rxbuf *rxb; - int i, j; - - if (rxq->vxrxq_mhead != NULL) { - m_freem(rxq->vxrxq_mhead); - rxq->vxrxq_mhead = NULL; - rxq->vxrxq_mtail = NULL; - } - - for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { - rxr = &rxq->vxrxq_cmd_ring[i]; - - for (j = 0; j < rxr->vxrxr_ndesc; j++) { - rxb = &rxr->vxrxr_rxbuf[j]; - - if (rxb->vrxb_m == NULL) - continue; - - bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap); - m_freem(rxb->vrxb_m); - rxb->vrxb_m = NULL; - } - } -} - -static void -vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) -{ - struct vmxnet3_rxqueue *rxq; - struct vmxnet3_txqueue *txq; - int i; - - for (i = 0; i < sc->vmx_nrxqueues; i++) { - rxq = &sc->vmx_rxq[i]; - VMXNET3_RXQ_LOCK(rxq); - VMXNET3_RXQ_UNLOCK(rxq); - } - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - VMXNET3_TXQ_LOCK(txq); - VMXNET3_TXQ_UNLOCK(txq); - } -} - -static void -vmxnet3_stop(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp; - int q; - - ifp = sc->vmx_ifp; - VMXNET3_CORE_LOCK_ASSERT(sc); - - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->vmx_link_active = 0; - callout_stop(&sc->vmx_tick); - - /* Disable interrupts. */ - vmxnet3_disable_all_intrs(sc); vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); - - vmxnet3_stop_rendezvous(sc); - - for (q = 0; q < sc->vmx_ntxqueues; q++) - vmxnet3_txstop(sc, &sc->vmx_txq[q]); - for (q = 0; q < sc->vmx_nrxqueues; q++) - vmxnet3_rxstop(sc, &sc->vmx_rxq[q]); - vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); } @@ -2447,74 +1743,47 @@ vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; + txq->vxtxq_last_flush = -1; + txr = &txq->vxtxq_cmd_ring; - txr->vxtxr_head = 0; txr->vxtxr_next = 0; txr->vxtxr_gen = VMXNET3_INIT_GEN; - bzero(txr->vxtxr_txd, - txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ txc = &txq->vxtxq_comp_ring; txc->vxcr_next = 0; txc->vxcr_gen = VMXNET3_INIT_GEN; - bzero(txc->vxcr_u.txcd, - txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ } -static int +static void vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) { - struct ifnet *ifp; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; - int i, populate, idx, frame_size, error; - - ifp = sc->vmx_ifp; - frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) + - ifp->if_mtu; + int i; /* - * If the MTU causes us to exceed what a regular sized cluster can - * handle, we allocate a second MJUMPAGESIZE cluster after it in - * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters. - * - * Keep rx_max_chain a divisor of the maximum Rx ring size to make - * our life easier. We do not support changing the ring size after - * the attach. + * The descriptors will be populated with buffers during a + * subsequent invocation of vmxnet3_isc_rxd_refill() */ - if (frame_size <= MCLBYTES) - sc->vmx_rx_max_chain = 1; - else - sc->vmx_rx_max_chain = 2; - - /* - * Only populate ring 1 if the configuration will take advantage - * of it. That is either when LRO is enabled or the frame size - * exceeds what ring 0 can contain. - */ - if ((ifp->if_capenable & IFCAP_LRO) == 0 && - frame_size <= MCLBYTES + MJUMPAGESIZE) - populate = 1; - else - populate = VMXNET3_RXRINGS_PERQ; - - for (i = 0; i < populate; i++) { + for (i = 0; i < sc->vmx_sctx->isc_nrxqs - 1; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_fill = 0; rxr->vxrxr_gen = VMXNET3_INIT_GEN; - bzero(rxr->vxrxr_rxd, - rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); - - for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { - error = vmxnet3_newbuf(sc, rxr); - if (error) - return (error); - } + /* + * iflib has zeroed out the descriptor array during the + * prior attach or stop + */ } for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; - rxr->vxrxr_fill = 0; rxr->vxrxr_gen = 0; bzero(rxr->vxrxr_rxd, rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); @@ -2523,46 +1792,42 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) rxc = &rxq->vxrxq_comp_ring; rxc->vxcr_next = 0; rxc->vxcr_gen = VMXNET3_INIT_GEN; - bzero(rxc->vxcr_u.rxcd, - rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); - - return (0); + /* + * iflib has zeroed out the descriptor array during the prior attach + * or stop + */ } -static int +static void vmxnet3_reinit_queues(struct vmxnet3_softc *sc) { - device_t dev; - int q, error; + if_softc_ctx_t scctx; + int q; - dev = sc->vmx_dev; + scctx = sc->vmx_scctx; - for (q = 0; q < sc->vmx_ntxqueues; q++) + for (q = 0; q < scctx->isc_ntxqsets; q++) vmxnet3_txinit(sc, &sc->vmx_txq[q]); - for (q = 0; q < sc->vmx_nrxqueues; q++) { - error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); - if (error) { - device_printf(dev, "cannot populate Rx queue %d\n", q); - return (error); - } - } - - return (0); + for (q = 0; q < scctx->isc_nrxqsets; q++) + vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); } static int vmxnet3_enable_device(struct vmxnet3_softc *sc) { + if_softc_ctx_t scctx; int q; + scctx = sc->vmx_scctx; + if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { device_printf(sc->vmx_dev, "device enable command failed!\n"); return (1); } /* Reset the Rx queue heads. */ - for (q = 0; q < sc->vmx_nrxqueues; q++) { + for (q = 0; q < scctx->isc_nrxqsets; q++) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); } @@ -2577,7 +1842,7 @@ vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) ifp = sc->vmx_ifp; - vmxnet3_set_rxfilter(sc); + vmxnet3_set_rxfilter(sc, if_getflags(ifp)); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter, @@ -2588,573 +1853,149 @@ vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); } -static int -vmxnet3_reinit(struct vmxnet3_softc *sc) +static void +vmxnet3_init(if_ctx_t ctx) { + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; + + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; + + scctx->isc_max_frame_size = if_getmtu(iflib_get_ifp(ctx)) + + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; + + /* Use the current MAC address. */ + bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); + vmxnet3_set_lladdr(sc); - vmxnet3_reinit_interface(sc); vmxnet3_reinit_shared_data(sc); + vmxnet3_reinit_queues(sc); - if (vmxnet3_reinit_queues(sc) != 0) - return (ENXIO); - - if (vmxnet3_enable_device(sc) != 0) - return (ENXIO); + vmxnet3_enable_device(sc); vmxnet3_reinit_rxfilters(sc); - - return (0); -} - -static void -vmxnet3_init_locked(struct vmxnet3_softc *sc) -{ - struct ifnet *ifp; - - ifp = sc->vmx_ifp; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - return; - - vmxnet3_stop(sc); - - if (vmxnet3_reinit(sc) != 0) { - vmxnet3_stop(sc); - return; - } - - ifp->if_drv_flags |= IFF_DRV_RUNNING; vmxnet3_link_status(sc); - - vmxnet3_enable_all_intrs(sc); - callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); } static void -vmxnet3_init(void *xsc) +vmxnet3_multi_set(if_ctx_t ctx) { - struct vmxnet3_softc *sc; - sc = xsc; - - VMXNET3_CORE_LOCK(sc); - vmxnet3_init_locked(sc); - VMXNET3_CORE_UNLOCK(sc); + vmxnet3_set_rxfilter(iflib_get_softc(ctx), + if_getflags(iflib_get_ifp(ctx))); } -/* - * BMV: Much of this can go away once we finally have offsets in - * the mbuf packet header. Bug andre@. - */ static int -vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m, - int *etype, int *proto, int *start) +vmxnet3_mtu_set(if_ctx_t ctx, uint32_t mtu) { - struct ether_vlan_header *evh; - int offset; -#if defined(INET) - struct ip *ip = NULL; - struct ip iphdr; -#endif -#if defined(INET6) - struct ip6_hdr *ip6 = NULL; - struct ip6_hdr ip6hdr; -#endif - evh = mtod(m, struct ether_vlan_header *); - if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - /* BMV: We should handle nested VLAN tags too. */ - *etype = ntohs(evh->evl_proto); - offset = sizeof(struct ether_vlan_header); - } else { - *etype = ntohs(evh->evl_encap_proto); - offset = sizeof(struct ether_header); - } - - switch (*etype) { -#if defined(INET) - case ETHERTYPE_IP: - if (__predict_false(m->m_len < offset + sizeof(struct ip))) { - m_copydata(m, offset, sizeof(struct ip), - (caddr_t) &iphdr); - ip = &iphdr; - } else - ip = mtodo(m, offset); - *proto = ip->ip_p; - *start = offset + (ip->ip_hl << 2); - break; -#endif -#if defined(INET6) - case ETHERTYPE_IPV6: - if (__predict_false(m->m_len < - offset + sizeof(struct ip6_hdr))) { - m_copydata(m, offset, sizeof(struct ip6_hdr), - (caddr_t) &ip6hdr); - ip6 = &ip6hdr; - } else - ip6 = mtodo(m, offset); - *proto = -1; - *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); - /* Assert the network stack sent us a valid packet. */ - KASSERT(*start > offset, - ("%s: mbuf %p start %d offset %d proto %d", __func__, m, - *start, offset, *proto)); - break; -#endif - default: + if (mtu > VMXNET3_TX_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + + ETHER_CRC_LEN)) return (EINVAL); - } - - if (m->m_pkthdr.csum_flags & CSUM_TSO) { - struct tcphdr *tcp, tcphdr; - uint16_t sum; - - if (__predict_false(*proto != IPPROTO_TCP)) { - /* Likely failed to correctly parse the mbuf. */ - return (EINVAL); - } - - txq->vxtxq_stats.vmtxs_tso++; - - switch (*etype) { -#if defined(INET) - case ETHERTYPE_IP: - sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, - htons(IPPROTO_TCP)); - break; -#endif -#if defined(INET6) - case ETHERTYPE_IPV6: - sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); - break; -#endif - default: - sum = 0; - break; - } - - if (m->m_len < *start + sizeof(struct tcphdr)) { - m_copyback(m, *start + offsetof(struct tcphdr, th_sum), - sizeof(uint16_t), (caddr_t) &sum); - m_copydata(m, *start, sizeof(struct tcphdr), - (caddr_t) &tcphdr); - tcp = &tcphdr; - } else { - tcp = mtodo(m, *start); - tcp->th_sum = sum; - } - - /* - * For TSO, the size of the protocol header is also - * included in the descriptor header size. - */ - *start += (tcp->th_off << 2); - } else - txq->vxtxq_stats.vmtxs_csum++; return (0); } -static int -vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, - bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs) +static void +vmxnet3_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { - struct vmxnet3_txring *txr; - struct mbuf *m; - bus_dma_tag_t tag; - int error; + struct vmxnet3_softc *sc; - txr = &txq->vxtxq_cmd_ring; - m = *m0; - tag = txr->vxtxr_txtag; + sc = iflib_get_softc(ctx); - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); - if (error == 0 || error != EFBIG) - return (error); + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; - m = m_defrag(m, M_NOWAIT); - if (m != NULL) { - *m0 = m; - error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); + if (vmxnet3_link_is_up(sc) != 0) { + ifmr->ifm_status |= IFM_ACTIVE; + ifmr->ifm_active |= IFM_AUTO; } else - error = ENOBUFS; - - if (error) { - m_freem(*m0); - *m0 = NULL; - txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++; - } else - txq->vxtxq_sc->vmx_stats.vmst_defragged++; - - return (error); -} - -static void -vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) -{ - struct vmxnet3_txring *txr; - - txr = &txq->vxtxq_cmd_ring; - bus_dmamap_unload(txr->vxtxr_txtag, dmap); + ifmr->ifm_active |= IFM_NONE; } static int -vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) +vmxnet3_media_change(if_ctx_t ctx) { - struct vmxnet3_softc *sc; - struct vmxnet3_txring *txr; - struct vmxnet3_txdesc *txd, *sop; - struct mbuf *m; - bus_dmamap_t dmap; - bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS]; - int i, gen, nsegs, etype, proto, start, error; - - sc = txq->vxtxq_sc; - start = 0; - txd = NULL; - txr = &txq->vxtxq_cmd_ring; - dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; - - error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs); - if (error) - return (error); - - m = *m0; - M_ASSERTPKTHDR(m); - KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, - ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); - - if (VMXNET3_TXRING_AVAIL(txr) < nsegs) { - txq->vxtxq_stats.vmtxs_full++; - vmxnet3_txq_unload_mbuf(txq, dmap); - return (ENOSPC); - } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { - error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start); - if (error) { - txq->vxtxq_stats.vmtxs_offload_failed++; - vmxnet3_txq_unload_mbuf(txq, dmap); - m_freem(m); - *m0 = NULL; - return (error); - } - } - - txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m; - sop = &txr->vxtxr_txd[txr->vxtxr_head]; - gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ - - for (i = 0; i < nsegs; i++) { - txd = &txr->vxtxr_txd[txr->vxtxr_head]; - - txd->addr = segs[i].ds_addr; - txd->len = segs[i].ds_len; - txd->gen = gen; - txd->dtype = 0; - txd->offload_mode = VMXNET3_OM_NONE; - txd->offload_pos = 0; - txd->hlen = 0; - txd->eop = 0; - txd->compreq = 0; - txd->vtag_mode = 0; - txd->vtag = 0; - - if (++txr->vxtxr_head == txr->vxtxr_ndesc) { - txr->vxtxr_head = 0; - txr->vxtxr_gen ^= 1; - } - gen = txr->vxtxr_gen; - } - txd->eop = 1; - txd->compreq = 1; - - if (m->m_flags & M_VLANTAG) { - sop->vtag_mode = 1; - sop->vtag = m->m_pkthdr.ether_vtag; - } - - if (m->m_pkthdr.csum_flags & CSUM_TSO) { - sop->offload_mode = VMXNET3_OM_TSO; - sop->hlen = start; - sop->offload_pos = m->m_pkthdr.tso_segsz; - } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | - VMXNET3_CSUM_OFFLOAD_IPV6)) { - sop->offload_mode = VMXNET3_OM_CSUM; - sop->hlen = start; - sop->offload_pos = start + m->m_pkthdr.csum_data; - } - - /* Finally, change the ownership. */ - vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); - sop->gen ^= 1; - - txq->vxtxq_ts->npending += nsegs; - if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { - txq->vxtxq_ts->npending = 0; - vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), - txr->vxtxr_head); - } - - return (0); -} - -#ifdef VMXNET3_LEGACY_TX - -static void -vmxnet3_start_locked(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - struct vmxnet3_txring *txr; - struct mbuf *m_head; - int tx, avail; - - sc = ifp->if_softc; - txq = &sc->vmx_txq[0]; - txr = &txq->vxtxq_cmd_ring; - tx = 0; - - VMXNET3_TXQ_LOCK_ASSERT(txq); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || - sc->vmx_link_active == 0) - return; - - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2) - break; - - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - /* Assume worse case if this mbuf is the head of a chain. */ - if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - - if (vmxnet3_txq_encap(txq, &m_head) != 0) { - if (m_head != NULL) - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - - tx++; - ETHER_BPF_MTAP(ifp, m_head); - } - - if (tx > 0) - txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; -} - -static void -vmxnet3_start(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - - sc = ifp->if_softc; - txq = &sc->vmx_txq[0]; - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_start_locked(ifp); - VMXNET3_TXQ_UNLOCK(txq); -} - -#else /* !VMXNET3_LEGACY_TX */ - -static int -vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txring *txr; - struct buf_ring *br; - struct ifnet *ifp; - int tx, avail, error; - - sc = txq->vxtxq_sc; - br = txq->vxtxq_br; - ifp = sc->vmx_ifp; - txr = &txq->vxtxq_cmd_ring; - tx = 0; - error = 0; - - VMXNET3_TXQ_LOCK_ASSERT(txq); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || - sc->vmx_link_active == 0) { - if (m != NULL) - error = drbr_enqueue(ifp, br, m); - return (error); - } - - if (m != NULL) { - error = drbr_enqueue(ifp, br, m); - if (error) - return (error); - } - - while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) { - m = drbr_peek(ifp, br); - if (m == NULL) - break; - - /* Assume worse case if this mbuf is the head of a chain. */ - if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { - drbr_putback(ifp, br, m); - break; - } - - if (vmxnet3_txq_encap(txq, &m) != 0) { - if (m != NULL) - drbr_putback(ifp, br, m); - else - drbr_advance(ifp, br); - break; - } - drbr_advance(ifp, br); - - tx++; - ETHER_BPF_MTAP(ifp, m); - } - - if (tx > 0) - txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; + /* Ignore. */ return (0); } static int -vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m) +vmxnet3_promisc_set(if_ctx_t ctx, int flags) { - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - int i, ntxq, error; - sc = ifp->if_softc; - ntxq = sc->vmx_ntxqueues; + vmxnet3_set_rxfilter(iflib_get_softc(ctx), flags); - /* check if flowid is set */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) - i = m->m_pkthdr.flowid % ntxq; - else - i = curcpu % ntxq; + return (0); +} - txq = &sc->vmx_txq[i]; +static uint64_t +vmxnet3_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + if_t ifp = iflib_get_ifp(ctx); - if (VMXNET3_TXQ_TRYLOCK(txq) != 0) { - error = vmxnet3_txq_mq_start_locked(txq, m); - VMXNET3_TXQ_UNLOCK(txq); - } else { - error = drbr_enqueue(ifp, txq->vxtxq_br, m); - taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask); - } + if (cnt < IFCOUNTERS) + return if_get_counter_default(ifp, cnt); - return (error); + return (0); } static void -vmxnet3_txq_tq_deferred(void *xtxq, int pending) +vmxnet3_update_admin_status(if_ctx_t ctx) { struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - txq = xtxq; - sc = txq->vxtxq_sc; + sc = iflib_get_softc(ctx); + if (sc->vmx_ds->event != 0) + vmxnet3_evintr(sc); - VMXNET3_TXQ_LOCK(txq); - if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br)) - vmxnet3_txq_mq_start_locked(txq, NULL); - VMXNET3_TXQ_UNLOCK(txq); -} - -#endif /* VMXNET3_LEGACY_TX */ - -static void -vmxnet3_txq_start(struct vmxnet3_txqueue *txq) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - - sc = txq->vxtxq_sc; - ifp = sc->vmx_ifp; - -#ifdef VMXNET3_LEGACY_TX - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - vmxnet3_start_locked(ifp); -#else - if (!drbr_empty(ifp, txq->vxtxq_br)) - vmxnet3_txq_mq_start_locked(txq, NULL); -#endif + vmxnet3_refresh_host_stats(sc); } static void -vmxnet3_tx_start_all(struct vmxnet3_softc *sc) +vmxnet3_txq_timer(if_ctx_t ctx, uint16_t qid) { - struct vmxnet3_txqueue *txq; - int i; - - VMXNET3_CORE_LOCK_ASSERT(sc); - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - - VMXNET3_TXQ_LOCK(txq); - vmxnet3_txq_start(txq); - VMXNET3_TXQ_UNLOCK(txq); - } + /* Host stats refresh is global, so just trigger it on txq 0 */ + if (qid == 0) + vmxnet3_refresh_host_stats(iflib_get_softc(ctx)); } static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag) { - struct ifnet *ifp; int idx, bit; - ifp = sc->vmx_ifp; - idx = (tag >> 5) & 0x7F; - bit = tag & 0x1F; - if (tag == 0 || tag > 4095) return; - VMXNET3_CORE_LOCK(sc); + idx = (tag >> 5) & 0x7F; + bit = tag & 0x1F; /* Update our private VLAN bitvector. */ if (add) sc->vmx_vlan_filter[idx] |= (1 << bit); else sc->vmx_vlan_filter[idx] &= ~(1 << bit); - - if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { - if (add) - sc->vmx_ds->vlan_filter[idx] |= (1 << bit); - else - sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit); - vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); - } - - VMXNET3_CORE_UNLOCK(sc); } static void -vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) +vmxnet3_vlan_register(if_ctx_t ctx, uint16_t tag) { - if (ifp->if_softc == arg) - vmxnet3_update_vlan_filter(arg, 1, tag); + vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 1, tag); } static void -vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) +vmxnet3_vlan_unregister(if_ctx_t ctx, uint16_t tag) { - if (ifp->if_softc == arg) - vmxnet3_update_vlan_filter(arg, 0, tag); + vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 0, tag); } static void -vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) +vmxnet3_set_rxfilter(struct vmxnet3_softc *sc, int flags) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; @@ -3165,9 +2006,9 @@ vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) ds = sc->vmx_ds; mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST; - if (ifp->if_flags & IFF_PROMISC) + if (flags & IFF_PROMISC) mode |= VMXNET3_RXMODE_PROMISC; - if (ifp->if_flags & IFF_ALLMULTI) + if (flags & IFF_ALLMULTI) mode |= VMXNET3_RXMODE_ALLMULTI; else { int cnt = 0, overflow = 0; @@ -3201,175 +2042,6 @@ vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); } -static int -vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu) -{ - struct ifnet *ifp; - - ifp = sc->vmx_ifp; - - if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU) - return (EINVAL); - - ifp->if_mtu = mtu; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } - - return (0); -} - -static int -vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) -{ - struct vmxnet3_softc *sc; - struct ifreq *ifr; - int reinit, mask, error; - - sc = ifp->if_softc; - ifr = (struct ifreq *) data; - error = 0; - - switch (cmd) { - case SIOCSIFMTU: - if (ifp->if_mtu != ifr->ifr_mtu) { - VMXNET3_CORE_LOCK(sc); - error = vmxnet3_change_mtu(sc, ifr->ifr_mtu); - VMXNET3_CORE_UNLOCK(sc); - } - break; - - case SIOCSIFFLAGS: - VMXNET3_CORE_LOCK(sc); - if (ifp->if_flags & IFF_UP) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { - if ((ifp->if_flags ^ sc->vmx_if_flags) & - (IFF_PROMISC | IFF_ALLMULTI)) { - vmxnet3_set_rxfilter(sc); - } - } else - vmxnet3_init_locked(sc); - } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - vmxnet3_stop(sc); - } - sc->vmx_if_flags = ifp->if_flags; - VMXNET3_CORE_UNLOCK(sc); - break; - - case SIOCADDMULTI: - case SIOCDELMULTI: - VMXNET3_CORE_LOCK(sc); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - vmxnet3_set_rxfilter(sc); - VMXNET3_CORE_UNLOCK(sc); - break; - - case SIOCSIFMEDIA: - case SIOCGIFMEDIA: - error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd); - break; - - case SIOCSIFCAP: - VMXNET3_CORE_LOCK(sc); - mask = ifr->ifr_reqcap ^ ifp->if_capenable; - - if (mask & IFCAP_TXCSUM) - ifp->if_capenable ^= IFCAP_TXCSUM; - if (mask & IFCAP_TXCSUM_IPV6) - ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; - if (mask & IFCAP_TSO4) - ifp->if_capenable ^= IFCAP_TSO4; - if (mask & IFCAP_TSO6) - ifp->if_capenable ^= IFCAP_TSO6; - - if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | - IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) { - /* Changing these features requires us to reinit. */ - reinit = 1; - - if (mask & IFCAP_RXCSUM) - ifp->if_capenable ^= IFCAP_RXCSUM; - if (mask & IFCAP_RXCSUM_IPV6) - ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; - if (mask & IFCAP_LRO) - ifp->if_capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWTAGGING) - ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; - if (mask & IFCAP_VLAN_HWFILTER) - ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; - } else - reinit = 0; - - if (mask & IFCAP_VLAN_HWTSO) - ifp->if_capenable ^= IFCAP_VLAN_HWTSO; - - if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } else { - vmxnet3_init_hwassist(sc); - } - - VMXNET3_CORE_UNLOCK(sc); - VLAN_CAPABILITIES(ifp); - break; - - default: - error = ether_ioctl(ifp, cmd, data); - break; - } - - VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc); - - return (error); -} - -#ifndef VMXNET3_LEGACY_TX -static void -vmxnet3_qflush(struct ifnet *ifp) -{ - struct vmxnet3_softc *sc; - struct vmxnet3_txqueue *txq; - struct mbuf *m; - int i; - - sc = ifp->if_softc; - - for (i = 0; i < sc->vmx_ntxqueues; i++) { - txq = &sc->vmx_txq[i]; - - VMXNET3_TXQ_LOCK(txq); - while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL) - m_freem(m); - VMXNET3_TXQ_UNLOCK(txq); - } - - if_qflush(ifp); -} -#endif - -static int -vmxnet3_watchdog(struct vmxnet3_txqueue *txq) -{ - struct vmxnet3_softc *sc; - - sc = txq->vxtxq_sc; - - VMXNET3_TXQ_LOCK(txq); - if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { - VMXNET3_TXQ_UNLOCK(txq); - return (0); - } - VMXNET3_TXQ_UNLOCK(txq); - - if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n", - txq->vxtxq_id); - return (1); -} - static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) { @@ -3377,134 +2049,35 @@ vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); } -static uint64_t -vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt) -{ - struct vmxnet3_softc *sc; - uint64_t rv; - - sc = if_getsoftc(ifp); - rv = 0; - - /* - * With the exception of if_ierrors, these ifnet statistics are - * only updated in the driver, so just set them to our accumulated - * values. if_ierrors is updated in ether_input() for malformed - * frames that we should have already discarded. - */ - switch (cnt) { - case IFCOUNTER_IPACKETS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets; - return (rv); - case IFCOUNTER_IQDROPS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops; - return (rv); - case IFCOUNTER_IERRORS: - for (int i = 0; i < sc->vmx_nrxqueues; i++) - rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors; - return (rv); - case IFCOUNTER_OPACKETS: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets; - return (rv); -#ifndef VMXNET3_LEGACY_TX - case IFCOUNTER_OBYTES: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes; - return (rv); - case IFCOUNTER_OMCASTS: - for (int i = 0; i < sc->vmx_ntxqueues; i++) - rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts; - return (rv); -#endif - default: - return (if_get_counter_default(ifp, cnt)); - } -} - -static void -vmxnet3_tick(void *xsc) -{ - struct vmxnet3_softc *sc; - struct ifnet *ifp; - int i, timedout; - - sc = xsc; - ifp = sc->vmx_ifp; - timedout = 0; - - VMXNET3_CORE_LOCK_ASSERT(sc); - - vmxnet3_refresh_host_stats(sc); - - for (i = 0; i < sc->vmx_ntxqueues; i++) - timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]); - - if (timedout != 0) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - vmxnet3_init_locked(sc); - } else - callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); -} - static int vmxnet3_link_is_up(struct vmxnet3_softc *sc) { uint32_t status; - /* Also update the link speed while here. */ status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); - sc->vmx_link_speed = status >> 16; return !!(status & 0x1); } static void vmxnet3_link_status(struct vmxnet3_softc *sc) { - struct ifnet *ifp; + if_ctx_t ctx; + uint64_t speed; int link; - ifp = sc->vmx_ifp; + ctx = sc->vmx_ctx; link = vmxnet3_link_is_up(sc); - + speed = IF_Gbps(10); + if (link != 0 && sc->vmx_link_active == 0) { sc->vmx_link_active = 1; - if_link_state_change(ifp, LINK_STATE_UP); + iflib_link_state_change(ctx, LINK_STATE_UP, speed); } else if (link == 0 && sc->vmx_link_active != 0) { sc->vmx_link_active = 0; - if_link_state_change(ifp, LINK_STATE_DOWN); + iflib_link_state_change(ctx, LINK_STATE_DOWN, speed); } } -static void -vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) -{ - struct vmxnet3_softc *sc; - - sc = ifp->if_softc; - - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - - VMXNET3_CORE_LOCK(sc); - if (vmxnet3_link_is_up(sc) != 0) { - ifmr->ifm_status |= IFM_ACTIVE; - ifmr->ifm_active |= IFM_AUTO; - } else - ifmr->ifm_active |= IFM_NONE; - VMXNET3_CORE_UNLOCK(sc); -} - -static int -vmxnet3_media_change(struct ifnet *ifp) -{ - - /* Ignore. */ - return (0); -} - static void vmxnet3_set_lladdr(struct vmxnet3_softc *sc) { @@ -3543,11 +2116,9 @@ vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq, { struct sysctl_oid *node, *txsnode; struct sysctl_oid_list *list, *txslist; - struct vmxnet3_txq_stats *stats; struct UPT1_TxStats *txstats; char namebuf[16]; - stats = &txq->vxtxq_stats; txstats = &txq->vxtxq_ts->stats; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id); @@ -3555,24 +2126,9 @@ vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq, NULL, "Transmit Queue"); txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, - &stats->vmtxs_opackets, "Transmit packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, - &stats->vmtxs_obytes, "Transmit bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, - &stats->vmtxs_omcasts, "Transmit multicasts"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, - &stats->vmtxs_csum, "Transmit checksum offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, - &stats->vmtxs_tso, "Transmit TCP segmentation offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD, - &stats->vmtxs_full, "Transmit ring full"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD, - &stats->vmtxs_offload_failed, "Transmit checksum offload failed"); - /* - * Add statistics reported by the host. These are updated once - * per second. + * Add statistics reported by the host. These are updated by the + * iflib txq timer on txq 0. */ txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); @@ -3601,11 +2157,9 @@ vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq, { struct sysctl_oid *node, *rxsnode; struct sysctl_oid_list *list, *rxslist; - struct vmxnet3_rxq_stats *stats; struct UPT1_RxStats *rxstats; char namebuf[16]; - stats = &rxq->vxrxq_stats; rxstats = &rxq->vxrxq_rs->stats; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id); @@ -3613,18 +2167,9 @@ vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq, NULL, "Receive Queue"); rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, - &stats->vmrxs_ipackets, "Receive packets"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, - &stats->vmrxs_ibytes, "Receive bytes"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, - &stats->vmrxs_iqdrops, "Receive drops"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, - &stats->vmrxs_ierrors, "Receive errors"); - /* - * Add statistics reported by the host. These are updated once - * per second. + * Add statistics reported by the host. These are updated by the + * iflib txq timer on txq 0. */ rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); @@ -3655,19 +2200,20 @@ static void vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { + if_softc_ctx_t scctx; struct sysctl_oid *node; struct sysctl_oid_list *list; int i; - for (i = 0; i < sc->vmx_ntxqueues; i++) { + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_ntxqsets; i++) { struct vmxnet3_txqueue *txq = &sc->vmx_txq[i]; node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD, - &txq->vxtxq_cmd_ring.vxtxr_head, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD, @@ -3682,27 +2228,21 @@ vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc, &txq->vxtxq_comp_ring.vxcr_gen, 0, ""); } - for (i = 0; i < sc->vmx_nrxqueues; i++) { + for (i = 0; i < scctx->isc_nrxqsets; i++) { struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i]; node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD, - &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, ""); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD, - &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, ""); - SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, - &rxq->vxrxq_comp_ring.vxcr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, @@ -3714,11 +2254,14 @@ static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { + if_softc_ctx_t scctx; int i; - for (i = 0; i < sc->vmx_ntxqueues; i++) + scctx = sc->vmx_scctx; + + for (i = 0; i < scctx->isc_ntxqsets; i++) vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child); - for (i = 0; i < sc->vmx_nrxqueues; i++) + for (i = 0; i < scctx->isc_nrxqsets; i++) vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child); vmxnet3_setup_debug_sysctl(sc, ctx, child); @@ -3728,7 +2271,6 @@ static void vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) { device_t dev; - struct vmxnet3_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; @@ -3738,26 +2280,6 @@ vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD, - &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD, - &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD, - &sc->vmx_ntxqueues, 0, "Number of Tx queues"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD, - &sc->vmx_nrxqueues, 0, "Number of Rx queues"); - - stats = &sc->vmx_stats; - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD, - &stats->vmst_defragged, 0, "Tx mbuf chains defragged"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD, - &stats->vmst_defrag_failed, 0, - "Tx mbuf dropped because defrag failed"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD, - &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed"); - SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD, - &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed"); - vmxnet3_setup_queue_sysctl(sc, ctx, child); } @@ -3813,118 +2335,64 @@ vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); } -static void -vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) +static int +vmxnet3_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { + /* Not using interrupts for TX */ + return (0); +} + +static int +vmxnet3_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) +{ + struct vmxnet3_softc *sc; + + sc = iflib_get_softc(ctx); + vmxnet3_enable_intr(sc, sc->vmx_rxq[qid].vxrxq_intr_idx); + return (0); +} + +static void +vmxnet3_link_intr_enable(if_ctx_t ctx) +{ + struct vmxnet3_softc *sc; + + sc = iflib_get_softc(ctx); + vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); +} + +static void +vmxnet3_intr_enable_all(if_ctx_t ctx) +{ + struct vmxnet3_softc *sc; + if_softc_ctx_t scctx; int i; + sc = iflib_get_softc(ctx); + scctx = sc->vmx_scctx; sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) + for (i = 0; i < scctx->isc_vectors; i++) vmxnet3_enable_intr(sc, i); } static void -vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) +vmxnet3_intr_disable_all(if_ctx_t ctx) { + struct vmxnet3_softc *sc; int i; - sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < sc->vmx_nintrs; i++) + sc = iflib_get_softc(ctx); + /* + * iflib may invoke this routine before vmxnet3_attach_post() has + * run, which is before the top level shared data area is + * initialized and the device made aware of it. + */ + if (sc->vmx_ds != NULL) + sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; + for (i = 0; i < VMXNET3_MAX_INTRS; i++) vmxnet3_disable_intr(sc, i); } -static void -vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) -{ - bus_addr_t *baddr = arg; - - if (error == 0) - *baddr = segs->ds_addr; -} - -static int -vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, - struct vmxnet3_dma_alloc *dma) -{ - device_t dev; - int error; - - dev = sc->vmx_dev; - bzero(dma, sizeof(struct vmxnet3_dma_alloc)); - - error = bus_dma_tag_create(bus_get_dma_tag(dev), - align, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - size, /* maxsize */ - 1, /* nsegments */ - size, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &dma->dma_tag); - if (error) { - device_printf(dev, "bus_dma_tag_create failed: %d\n", error); - goto fail; - } - - error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, - BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map); - if (error) { - device_printf(dev, "bus_dmamem_alloc failed: %d\n", error); - goto fail; - } - - error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, - size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT); - if (error) { - device_printf(dev, "bus_dmamap_load failed: %d\n", error); - goto fail; - } - - dma->dma_size = size; - -fail: - if (error) - vmxnet3_dma_free(sc, dma); - - return (error); -} - -static void -vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) -{ - - if (dma->dma_tag != NULL) { - if (dma->dma_paddr != 0) { - bus_dmamap_sync(dma->dma_tag, dma->dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->dma_tag, dma->dma_map); - } - - if (dma->dma_vaddr != NULL) { - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, - dma->dma_map); - } - - bus_dma_tag_destroy(dma->dma_tag); - } - bzero(dma, sizeof(struct vmxnet3_dma_alloc)); -} - -static int -vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def) -{ - char path[64]; - - snprintf(path, sizeof(path), - "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob); - TUNABLE_INT_FETCH(path, &def); - - return (def); -} - /* * Since this is a purely paravirtualized device, we do not have * to worry about DMA coherency. But at times, we must make sure diff --git a/sys/dev/vmware/vmxnet3/if_vmxvar.h b/sys/dev/vmware/vmxnet3/if_vmxvar.h index 7d7c80cb8865..7e231e8924c3 100644 --- a/sys/dev/vmware/vmxnet3/if_vmxvar.h +++ b/sys/dev/vmware/vmxnet3/if_vmxvar.h @@ -1,6 +1,7 @@ /*- * Copyright (c) 2013 Tsubai Masanari * Copyright (c) 2013 Bryan Venteicher + * Copyright (c) 2018 Patrick Kelsey * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,14 +23,6 @@ struct vmxnet3_softc; -struct vmxnet3_dma_alloc { - bus_addr_t dma_paddr; - caddr_t dma_vaddr; - bus_dma_tag_t dma_tag; - bus_dmamap_t dma_map; - bus_size_t dma_size; -}; - /* * The number of Rx/Tx queues this driver prefers. */ @@ -57,153 +50,68 @@ struct vmxnet3_dma_alloc { #define VMXNET3_MAX_RX_NCOMPDESC \ (VMXNET3_MAX_RX_NDESC * VMXNET3_RXRINGS_PERQ) -struct vmxnet3_txbuf { - bus_dmamap_t vtxb_dmamap; - struct mbuf *vtxb_m; -}; - struct vmxnet3_txring { - struct vmxnet3_txbuf *vxtxr_txbuf; - u_int vxtxr_head; u_int vxtxr_next; u_int vxtxr_ndesc; int vxtxr_gen; - bus_dma_tag_t vxtxr_txtag; struct vmxnet3_txdesc *vxtxr_txd; - struct vmxnet3_dma_alloc vxtxr_dma; -}; - -static inline int -VMXNET3_TXRING_AVAIL(struct vmxnet3_txring *txr) -{ - int avail = txr->vxtxr_next - txr->vxtxr_head - 1; - return (avail < 0 ? txr->vxtxr_ndesc + avail : avail); -} - -struct vmxnet3_rxbuf { - bus_dmamap_t vrxb_dmamap; - struct mbuf *vrxb_m; + bus_addr_t vxtxr_paddr; }; struct vmxnet3_rxring { - struct vmxnet3_rxbuf *vxrxr_rxbuf; struct vmxnet3_rxdesc *vxrxr_rxd; - u_int vxrxr_fill; u_int vxrxr_ndesc; int vxrxr_gen; - int vxrxr_rid; - bus_dma_tag_t vxrxr_rxtag; - struct vmxnet3_dma_alloc vxrxr_dma; - bus_dmamap_t vxrxr_spare_dmap; + bus_addr_t vxrxr_paddr; }; -static inline void -vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr) -{ - - if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) { - rxr->vxrxr_fill = 0; - rxr->vxrxr_gen ^= 1; - } -} - struct vmxnet3_comp_ring { union { struct vmxnet3_txcompdesc *txcd; struct vmxnet3_rxcompdesc *rxcd; } vxcr_u; + /* + * vxcr_next is used on the transmit side to track the next index to + * begin cleaning at. It is not used on the receive side. + */ u_int vxcr_next; u_int vxcr_ndesc; int vxcr_gen; - struct vmxnet3_dma_alloc vxcr_dma; -}; - -struct vmxnet3_txq_stats { - uint64_t vmtxs_opackets; /* if_opackets */ - uint64_t vmtxs_obytes; /* if_obytes */ - uint64_t vmtxs_omcasts; /* if_omcasts */ - uint64_t vmtxs_csum; - uint64_t vmtxs_tso; - uint64_t vmtxs_full; - uint64_t vmtxs_offload_failed; + bus_addr_t vxcr_paddr; }; struct vmxnet3_txqueue { - struct mtx vxtxq_mtx; struct vmxnet3_softc *vxtxq_sc; -#ifndef VMXNET3_LEGACY_TX - struct buf_ring *vxtxq_br; -#endif int vxtxq_id; + int vxtxq_last_flush; int vxtxq_intr_idx; - int vxtxq_watchdog; struct vmxnet3_txring vxtxq_cmd_ring; struct vmxnet3_comp_ring vxtxq_comp_ring; - struct vmxnet3_txq_stats vxtxq_stats; struct vmxnet3_txq_shared *vxtxq_ts; struct sysctl_oid_list *vxtxq_sysctl; -#ifndef VMXNET3_LEGACY_TX - struct task vxtxq_defrtask; -#endif char vxtxq_name[16]; } __aligned(CACHE_LINE_SIZE); -#define VMXNET3_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vxtxq_mtx) -#define VMXNET3_TXQ_LOCK_ASSERT(_txq) \ - mtx_assert(&(_txq)->vxtxq_mtx, MA_OWNED) -#define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ - mtx_assert(&(_txq)->vxtxq_mtx, MA_NOTOWNED) - -struct vmxnet3_rxq_stats { - uint64_t vmrxs_ipackets; /* if_ipackets */ - uint64_t vmrxs_ibytes; /* if_ibytes */ - uint64_t vmrxs_iqdrops; /* if_iqdrops */ - uint64_t vmrxs_ierrors; /* if_ierrors */ -}; - struct vmxnet3_rxqueue { - struct mtx vxrxq_mtx; struct vmxnet3_softc *vxrxq_sc; int vxrxq_id; int vxrxq_intr_idx; - struct mbuf *vxrxq_mhead; - struct mbuf *vxrxq_mtail; + struct if_irq vxrxq_irq; struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ]; struct vmxnet3_comp_ring vxrxq_comp_ring; - struct vmxnet3_rxq_stats vxrxq_stats; struct vmxnet3_rxq_shared *vxrxq_rs; struct sysctl_oid_list *vxrxq_sysctl; char vxrxq_name[16]; } __aligned(CACHE_LINE_SIZE); -#define VMXNET3_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vxrxq_mtx) -#define VMXNET3_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vxrxq_mtx) -#define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \ - mtx_assert(&(_rxq)->vxrxq_mtx, MA_OWNED) -#define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ - mtx_assert(&(_rxq)->vxrxq_mtx, MA_NOTOWNED) - -struct vmxnet3_statistics { - uint32_t vmst_defragged; - uint32_t vmst_defrag_failed; - uint32_t vmst_mgetcl_failed; - uint32_t vmst_mbuf_load_failed; -}; - -struct vmxnet3_interrupt { - struct resource *vmxi_irq; - int vmxi_rid; - void *vmxi_handler; -}; - struct vmxnet3_softc { device_t vmx_dev; + if_ctx_t vmx_ctx; + if_shared_ctx_t vmx_sctx; + if_softc_ctx_t vmx_scctx; struct ifnet *vmx_ifp; struct vmxnet3_driver_shared *vmx_ds; uint32_t vmx_flags; -#define VMXNET3_FLAG_NO_MSIX 0x0001 #define VMXNET3_FLAG_RSS 0x0002 struct vmxnet3_rxqueue *vmx_rxq; @@ -215,56 +123,24 @@ struct vmxnet3_softc { struct resource *vmx_res1; bus_space_tag_t vmx_iot1; bus_space_handle_t vmx_ioh1; - struct resource *vmx_msix_res; int vmx_link_active; - int vmx_link_speed; - int vmx_if_flags; - int vmx_ntxqueues; - int vmx_nrxqueues; - int vmx_ntxdescs; - int vmx_nrxdescs; - int vmx_max_rxsegs; - int vmx_rx_max_chain; - struct vmxnet3_statistics vmx_stats; - - int vmx_intr_type; int vmx_intr_mask_mode; int vmx_event_intr_idx; - int vmx_nintrs; - struct vmxnet3_interrupt vmx_intrs[VMXNET3_MAX_INTRS]; + struct if_irq vmx_event_intr_irq; - struct mtx vmx_mtx; -#ifndef VMXNET3_LEGACY_TX - struct taskqueue *vmx_tq; -#endif uint8_t *vmx_mcast; - void *vmx_qs; struct vmxnet3_rss_shared *vmx_rss; - struct callout vmx_tick; - struct vmxnet3_dma_alloc vmx_ds_dma; - struct vmxnet3_dma_alloc vmx_qs_dma; - struct vmxnet3_dma_alloc vmx_mcast_dma; - struct vmxnet3_dma_alloc vmx_rss_dma; - struct ifmedia vmx_media; - int vmx_max_ntxqueues; - int vmx_max_nrxqueues; - eventhandler_tag vmx_vlan_attach; - eventhandler_tag vmx_vlan_detach; + struct iflib_dma_info vmx_ds_dma; + struct iflib_dma_info vmx_qs_dma; + struct iflib_dma_info vmx_mcast_dma; + struct iflib_dma_info vmx_rss_dma; + struct ifmedia *vmx_media; uint32_t vmx_vlan_filter[4096/32]; uint8_t vmx_lladdr[ETHER_ADDR_LEN]; }; -#define VMXNET3_CORE_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->vmx_mtx, _name, "VMXNET3 Lock", MTX_DEF) -#define VMXNET3_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_LOCK(_sc) mtx_lock(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->vmx_mtx) -#define VMXNET3_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->vmx_mtx, MA_OWNED) -#define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ - mtx_assert(&(_sc)->vmx_mtx, MA_NOTOWNED) - /* * Our driver version we report to the hypervisor; we just keep * this value constant. @@ -275,21 +151,28 @@ struct vmxnet3_softc { * Max descriptors per Tx packet. We must limit the size of the * any TSO packets based on the number of segments. */ -#define VMXNET3_TX_MAXSEGS 32 +#define VMXNET3_TX_MAXSEGS 32 /* 64K @ 2K segment size */ #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES) +#define VMXNET3_TSO_MAXSIZE (VMXNET3_TX_MAXSIZE - ETHER_VLAN_ENCAP_LEN) /* - * Maximum support Tx segments size. The length field in the + * Maximum supported Tx segment size. The length field in the * Tx descriptor is 14 bits. + * + * XXX It's possible a descriptor length field of 0 means 2^14, but this + * isn't confirmed, so limit to 2^14 - 1 for now. */ -#define VMXNET3_TX_MAXSEGSIZE (1 << 14) +#define VMXNET3_TX_MAXSEGSIZE ((1 << 14) - 1) /* - * The maximum number of Rx segments we accept. When LRO is enabled, - * this allows us to receive the maximum sized frame with one MCLBYTES - * cluster followed by 16 MJUMPAGESIZE clusters. + * Maximum supported Rx segment size. The length field in the + * Rx descriptor is 14 bits. + * + * The reference drivers skip zero-length descriptors, which seems to be a + * strong indication that on the receive side, a descriptor length field of + * zero does not mean 2^14. */ -#define VMXNET3_MAX_RX_SEGS 17 +#define VMXNET3_RX_MAXSEGSIZE ((1 << 14) - 1) /* * Predetermined size of the multicast MACs filter table. If the @@ -298,17 +181,6 @@ struct vmxnet3_softc { */ #define VMXNET3_MULTICAST_MAX 32 -/* - * Our Tx watchdog timeout. - */ -#define VMXNET3_WATCHDOG_TIMEOUT 5 - -/* - * Number of slots in the Tx bufrings. This value matches most other - * multiqueue drivers. - */ -#define VMXNET3_DEF_BUFRING_SIZE 4096 - /* * IP protocols that we can perform Tx checksum offloading of. */ @@ -318,28 +190,4 @@ struct vmxnet3_softc { #define VMXNET3_CSUM_ALL_OFFLOAD \ (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | CSUM_TSO) -/* - * Compat macros to keep this driver compiling on old releases. - */ - -#if !defined(SYSCTL_ADD_UQUAD) -#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD -#endif - -#if !defined(IFCAP_TXCSUM_IPV6) -#define IFCAP_TXCSUM_IPV6 0 -#endif - -#if !defined(IFCAP_RXCSUM_IPV6) -#define IFCAP_RXCSUM_IPV6 0 -#endif - -#if !defined(CSUM_TCP_IPV6) -#define CSUM_TCP_IPV6 0 -#endif - -#if !defined(CSUM_UDP_IPV6) -#define CSUM_UDP_IPV6 0 -#endif - #endif /* _IF_VMXVAR_H */ diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c index 504f3e786da0..33df3efcb254 100644 --- a/sys/fs/nfsserver/nfs_nfsdport.c +++ b/sys/fs/nfsserver/nfs_nfsdport.c @@ -3614,8 +3614,7 @@ nfssvc_srvcall(struct thread *p, struct nfssvc_args *uap, struct ucred *cred) error = EPERM; if (!error) { len = sizeof (struct nfsd_dumpclients) * dumplist.ndl_size; - dumpclients = (struct nfsd_dumpclients *)malloc(len, - M_TEMP, M_WAITOK); + dumpclients = malloc(len, M_TEMP, M_WAITOK | M_ZERO); nfsrv_dumpclients(dumpclients, dumplist.ndl_size); error = copyout(dumpclients, CAST_USER_ADDR_T(dumplist.ndl_list), len); @@ -3633,8 +3632,7 @@ nfssvc_srvcall(struct thread *p, struct nfssvc_args *uap, struct ucred *cred) if (!error) { len = sizeof (struct nfsd_dumplocks) * dumplocklist.ndllck_size; - dumplocks = (struct nfsd_dumplocks *)malloc(len, - M_TEMP, M_WAITOK); + dumplocks = malloc(len, M_TEMP, M_WAITOK | M_ZERO); nfsrv_dumplocks(nd.ni_vp, dumplocks, dumplocklist.ndllck_size, p); vput(nd.ni_vp); diff --git a/sys/fs/smbfs/smbfs_vnops.c b/sys/fs/smbfs/smbfs_vnops.c index 09d21b8c4bb5..456d501d225e 100644 --- a/sys/fs/smbfs/smbfs_vnops.c +++ b/sys/fs/smbfs/smbfs_vnops.c @@ -1120,8 +1120,8 @@ smbfs_advlock(ap) static int smbfs_pathcheck(struct smbmount *smp, const char *name, int nmlen, int nameiop) { - static const char *badchars = "*/:<>;?"; - static const char *badchars83 = " +|,[]="; + static const char *badchars = "*/:<>?"; + static const char *badchars83 = " +|,[]=;"; const char *cp; int i, error; diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC index 46e6a599fb40..fbcc61647ad7 100644 --- a/sys/i386/conf/GENERIC +++ b/sys/i386/conf/GENERIC @@ -280,7 +280,6 @@ device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs -options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support diff --git a/sys/mips/conf/ERL b/sys/mips/conf/ERL index 639f34d09b67..d1f23271dbe6 100644 --- a/sys/mips/conf/ERL +++ b/sys/mips/conf/ERL @@ -136,7 +136,6 @@ device octm # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs -options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support diff --git a/sys/mips/conf/OCTEON1 b/sys/mips/conf/OCTEON1 index 068126bf8b80..09d1d395cdd7 100644 --- a/sys/mips/conf/OCTEON1 +++ b/sys/mips/conf/OCTEON1 @@ -169,7 +169,6 @@ device bge # Broadcom BCM570xx Gigabit Ethernet device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs -options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support diff --git a/sys/modules/vmware/vmxnet3/Makefile b/sys/modules/vmware/vmxnet3/Makefile index 9debcc6744ce..331c4dd4860a 100644 --- a/sys/modules/vmware/vmxnet3/Makefile +++ b/sys/modules/vmware/vmxnet3/Makefile @@ -27,10 +27,7 @@ KMOD= if_vmx SRCS= if_vmx.c -SRCS+= bus_if.h device_if.h pci_if.h opt_inet.h opt_inet6.h - -# With VMXNET3_LEGACY_TX, the driver will use the non-multiqueue -# capable if_start interface. -#CFLAGS+= -DVMXNET3_LEGACY_TX +SRCS+= bus_if.h device_if.h pci_if.h ifdi_if.h +SRCS+= opt_inet.h opt_inet6.h .include diff --git a/sys/net/if.c b/sys/net/if.c index 44f5e8ca5a04..a6552f80f376 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -168,14 +168,14 @@ struct ifmediareq32 { #define SIOCGIFXMEDIA32 _IOC_NEWTYPE(SIOCGIFXMEDIA, struct ifmediareq32) #define _CASE_IOC_IFGROUPREQ_32(cmd) \ - case _IOC_NEWTYPE((cmd), struct ifgroupreq32): + _IOC_NEWTYPE((cmd), struct ifgroupreq32): case #else /* !COMPAT_FREEBSD32 */ #define _CASE_IOC_IFGROUPREQ_32(cmd) #endif /* !COMPAT_FREEBSD32 */ #define CASE_IOC_IFGROUPREQ(cmd) \ _CASE_IOC_IFGROUPREQ_32(cmd) \ - case (cmd) + (cmd) union ifreq_union { struct ifreq ifr; @@ -2894,7 +2894,7 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) error = if_gethwaddr(ifp, ifr); break; - CASE_IOC_IFGROUPREQ(SIOCAIFGROUP): + case CASE_IOC_IFGROUPREQ(SIOCAIFGROUP): error = priv_check(td, PRIV_NET_ADDIFGROUP); if (error) return (error); @@ -2903,12 +2903,12 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) return (error); break; - CASE_IOC_IFGROUPREQ(SIOCGIFGROUP): + case CASE_IOC_IFGROUPREQ(SIOCGIFGROUP): if ((error = if_getgroup((struct ifgroupreq *)data, ifp))) return (error); break; - CASE_IOC_IFGROUPREQ(SIOCDIFGROUP): + case CASE_IOC_IFGROUPREQ(SIOCDIFGROUP): error = priv_check(td, PRIV_NET_DELIFGROUP); if (error) return (error); @@ -3063,7 +3063,7 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) error = if_clone_list((struct if_clonereq *)data); goto out_noref; - CASE_IOC_IFGROUPREQ(SIOCGIFGMEMB): + case CASE_IOC_IFGROUPREQ(SIOCGIFGMEMB): error = if_getgroupmembers((struct ifgroupreq *)data); goto out_noref; diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 00a490555ec3..0e4445a2575a 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -289,8 +289,6 @@ typedef struct iflib_sw_tx_desc_array { /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 -/* bnxt supports 64 with hardware LRO enabled */ -#define IFLIB_MAX_RX_SEGS 64 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ @@ -1327,16 +1325,13 @@ _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) } int -iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) +iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) { int err; - if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; - KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); - - err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - sctx->isc_q_align, 0, /* alignment, bounds */ + err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ @@ -1386,6 +1381,16 @@ iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) return (err); } +int +iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) +{ + if_shared_ctx_t sctx = ctx->ifc_sctx; + + KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); + + return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); +} + int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { @@ -4368,11 +4373,8 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { - if (ctx->ifc_flags & IFC_SC_ALLOCATED) - free(sc, M_IFLIB); - free(ctx, M_IFLIB); device_printf(dev, "iflib_register failed %d\n", err); - return (err); + goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); @@ -4382,9 +4384,8 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { - CTX_UNLOCK(ctx); device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); - return (err); + goto fail_unlock; } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; @@ -4414,7 +4415,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; - goto fail; + goto fail_iflib_detach; } } for (i = 0; i < sctx->isc_ntxqs; i++) { @@ -4422,7 +4423,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; - goto fail; + goto fail_iflib_detach; } } @@ -4492,7 +4493,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); - goto fail; + goto fail_intr_free; } if ((err = iflib_qset_structures_setup(ctx))) @@ -4511,7 +4512,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct IFDI_INTR_DISABLE(ctx); if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); - goto fail_intr_free; + goto fail_queues; } if (msix <= 1) { rid = 0; @@ -4521,7 +4522,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); - goto fail_intr_free; + goto fail_queues; } } @@ -4557,14 +4558,18 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: + iflib_free_intr_mem(ctx); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); -fail: - iflib_free_intr_mem(ctx); +fail_iflib_detach: IFDI_DETACH(ctx); +fail_unlock: CTX_UNLOCK(ctx); - +fail_ctx_free: + if (ctx->ifc_flags & IFC_SC_ALLOCATED) + free(ctx->ifc_softc, M_IFLIB); + free(ctx, M_IFLIB); return (err); } @@ -4593,9 +4598,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); - free(sc, M_IFLIB); - free(ctx, M_IFLIB); - return (err); + goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); @@ -4609,14 +4612,14 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); - return (err); + goto fail_ctx_free; } if (sctx->isc_flags & IFLIB_GEN_MAC) iflib_gen_mac(ctx); if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, clctx->cc_params)) != 0) { device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); - return (err); + goto fail_ctx_free; } ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); @@ -4674,7 +4677,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; - goto fail; + goto fail_iflib_detach; } } for (i = 0; i < sctx->isc_ntxqs; i++) { @@ -4682,7 +4685,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; - goto fail; + goto fail_iflib_detach; } } @@ -4728,7 +4731,7 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); - goto fail; + goto fail_iflib_detach; } if ((err = iflib_qset_structures_setup(ctx))) { @@ -4768,8 +4771,11 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); -fail: +fail_iflib_detach: IFDI_DETACH(ctx); +fail_ctx_free: + free(ctx->ifc_softc, M_IFLIB); + free(ctx, M_IFLIB); return (err); } diff --git a/sys/net/iflib.h b/sys/net/iflib.h index 8c2be41b3f33..f8524859391c 100644 --- a/sys/net/iflib.h +++ b/sys/net/iflib.h @@ -69,6 +69,9 @@ typedef struct if_rxd_frag { uint16_t irf_len; } *if_rxd_frag_t; +/* bnxt supports 64 with hardware LRO enabled */ +#define IFLIB_MAX_RX_SEGS 64 + typedef struct if_rxd_info { /* set by iflib */ uint16_t iri_qsidx; /* qset index */ @@ -428,6 +431,7 @@ void iflib_iov_intr_deferred(if_ctx_t ctx); void iflib_link_state_change(if_ctx_t ctx, int linkstate, uint64_t baudrate); int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags); +int iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags); void iflib_dma_free(iflib_dma_info_t dma); int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count); diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c index 658ccc20ccc4..a0b36f2866bb 100644 --- a/sys/net80211/ieee80211.c +++ b/sys/net80211/ieee80211.c @@ -405,8 +405,10 @@ ieee80211_ifdetach(struct ieee80211com *ic) * The VAP is responsible for setting and clearing * the VIMAGE context. */ - while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) + while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) { + ieee80211_com_vdetach(vap); ieee80211_vap_destroy(vap); + } ieee80211_waitfor_parent(ic); ieee80211_sysctl_detach(ic); diff --git a/sys/net80211/ieee80211.h b/sys/net80211/ieee80211.h index db46b8f1ce66..61389169bc47 100644 --- a/sys/net80211/ieee80211.h +++ b/sys/net80211/ieee80211.h @@ -951,9 +951,11 @@ enum { IEEE80211_ELEMID_ERP = 42, IEEE80211_ELEMID_HTCAP = 45, IEEE80211_ELEMID_QOS = 46, + IEEE80211_ELEMID_RESERVED_47 = 47, IEEE80211_ELEMID_RSN = 48, IEEE80211_ELEMID_XRATES = 50, IEEE80211_ELEMID_APCHANREP = 51, + IEEE80211_ELEMID_MOBILITY_DOMAIN = 54, IEEE80211_ELEMID_HTINFO = 61, IEEE80211_ELEMID_SECCHAN_OFFSET = 62, IEEE80211_ELEMID_RRM_ENACAPS = 70, diff --git a/sys/net80211/ieee80211_freebsd.c b/sys/net80211/ieee80211_freebsd.c index b33969a0b119..925872593fa8 100644 --- a/sys/net80211/ieee80211_freebsd.c +++ b/sys/net80211/ieee80211_freebsd.c @@ -307,6 +307,55 @@ ieee80211_sysctl_vdetach(struct ieee80211vap *vap) } } +#define MS(_v, _f) (((_v) & _f##_M) >> _f##_S) +int +ieee80211_com_vincref(struct ieee80211vap *vap) +{ + uint32_t ostate; + + ostate = atomic_fetchadd_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); + + if (ostate & IEEE80211_COM_DETACHED) { + atomic_subtract_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); + return (ENETDOWN); + } + + if (MS(ostate, IEEE80211_COM_REF) == IEEE80211_COM_REF_MAX) { + atomic_subtract_32(&vap->iv_com_state, IEEE80211_COM_REF_ADD); + return (EOVERFLOW); + } + + return (0); +} + +void +ieee80211_com_vdecref(struct ieee80211vap *vap) +{ + uint32_t ostate; + + ostate = atomic_fetchadd_32(&vap->iv_com_state, -IEEE80211_COM_REF_ADD); + + KASSERT(MS(ostate, IEEE80211_COM_REF) != 0, + ("com reference counter underflow")); + + (void) ostate; +} + +void +ieee80211_com_vdetach(struct ieee80211vap *vap) +{ + int sleep_time; + + sleep_time = msecs_to_ticks(250); + if (sleep_time == 0) + sleep_time = 1; + + atomic_set_32(&vap->iv_com_state, IEEE80211_COM_DETACHED); + while (MS(atomic_load_32(&vap->iv_com_state), IEEE80211_COM_REF) != 0) + pause("comref", sleep_time); +} +#undef MS + int ieee80211_node_dectestref(struct ieee80211_node *ni) { diff --git a/sys/net80211/ieee80211_freebsd.h b/sys/net80211/ieee80211_freebsd.h index 8395eb008962..a70de1086bf6 100644 --- a/sys/net80211/ieee80211_freebsd.h +++ b/sys/net80211/ieee80211_freebsd.h @@ -224,6 +224,11 @@ typedef struct mtx ieee80211_rt_lock_t; */ #include +struct ieee80211vap; +int ieee80211_com_vincref(struct ieee80211vap *); +void ieee80211_com_vdecref(struct ieee80211vap *); +void ieee80211_com_vdetach(struct ieee80211vap *); + #define ieee80211_node_initref(_ni) \ do { ((_ni)->ni_refcnt = 1); } while (0) #define ieee80211_node_incref(_ni) \ @@ -235,7 +240,6 @@ int ieee80211_node_dectestref(struct ieee80211_node *ni); #define ieee80211_node_refcnt(_ni) (_ni)->ni_refcnt struct ifqueue; -struct ieee80211vap; void ieee80211_drain_ifq(struct ifqueue *); void ieee80211_flush_ifq(struct ifqueue *, struct ieee80211vap *); diff --git a/sys/net80211/ieee80211_ioctl.c b/sys/net80211/ieee80211_ioctl.c index e19f4fcfad38..7241a95bf8df 100644 --- a/sys/net80211/ieee80211_ioctl.c +++ b/sys/net80211/ieee80211_ioctl.c @@ -3480,10 +3480,14 @@ ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; - int error = 0, wait = 0; + int error = 0, wait = 0, ic_used; struct ifreq *ifr; struct ifaddr *ifa; /* XXX */ + ic_used = (cmd != SIOCSIFMTU && cmd != SIOCG80211STATS); + if (ic_used && (error = ieee80211_com_vincref(vap)) != 0) + return (error); + switch (cmd) { case SIOCSIFFLAGS: IEEE80211_LOCK(ic); @@ -3620,5 +3624,9 @@ ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) error = ether_ioctl(ifp, cmd, data); break; } + + if (ic_used) + ieee80211_com_vdecref(vap); + return (error); } diff --git a/sys/net80211/ieee80211_var.h b/sys/net80211/ieee80211_var.h index ee17c806dc34..0839baef885a 100644 --- a/sys/net80211/ieee80211_var.h +++ b/sys/net80211/ieee80211_var.h @@ -400,6 +400,7 @@ struct ieee80211vap { uint32_t iv_caps; /* capabilities */ uint32_t iv_htcaps; /* HT capabilities */ uint32_t iv_htextcaps; /* HT extended capabilities */ + uint32_t iv_com_state; /* com usage / detached flag */ enum ieee80211_opmode iv_opmode; /* operation mode */ enum ieee80211_state iv_state; /* state machine state */ enum ieee80211_state iv_nstate; /* pending state */ @@ -685,6 +686,12 @@ MALLOC_DECLARE(M_80211_VAP); #define IEEE80211_VFHT_BITS \ "\20\1VHT\2VHT40\3VHT80\4VHT80P80\5VHT160" +#define IEEE80211_COM_DETACHED 0x00000001 /* ieee80211_ifdetach called */ +#define IEEE80211_COM_REF_ADD 0x00000002 /* add / remove reference */ +#define IEEE80211_COM_REF_M 0xfffffffe /* reference counter bits */ +#define IEEE80211_COM_REF_S 1 +#define IEEE80211_COM_REF_MAX (IEEE80211_COM_REF_M >> IEEE80211_COM_REF_S) + int ic_printf(struct ieee80211com *, const char *, ...) __printflike(2, 3); void ieee80211_ifattach(struct ieee80211com *); void ieee80211_ifdetach(struct ieee80211com *); diff --git a/sys/net80211/ieee80211_wps.h b/sys/net80211/ieee80211_wps.h new file mode 100644 index 000000000000..32cc667e9011 --- /dev/null +++ b/sys/net80211/ieee80211_wps.h @@ -0,0 +1,149 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2017 J.R. Oldroyd, Open Advisors Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef _NET80211_IEEE80211_WPS_H_ +#define _NET80211_IEEE80211_WPS_H_ + +/* + * 802.11 WPS implementation definitions. + */ + +#define IEEE80211_WPS_ATTR_AP_CHANNEL 0x1001 +#define IEEE80211_WPS_ATTR_ASSOC_STATE 0x1002 +#define IEEE80211_WPS_ATTR_AUTH_TYPE 0x1003 +#define IEEE80211_WPS_ATTR_AUTH_TYPE_FLAGS 0x1004 +#define IEEE80211_WPS_ATTR_AUTHENTICATOR 0x1005 +#define IEEE80211_WPS_ATTR_CONFIG_METHODS 0x1008 +#define IEEE80211_WPS_ATTR_CONFIG_ERROR 0x1009 +#define IEEE80211_WPS_ATTR_CONFIRM_URL4 0x100a +#define IEEE80211_WPS_ATTR_CONFIRM_URL6 0x100b +#define IEEE80211_WPS_ATTR_CONN_TYPE 0x100c +#define IEEE80211_WPS_ATTR_CONN_TYPE_FLAGS 0x100d +#define IEEE80211_WPS_ATTR_CRED 0x100e +#define IEEE80211_WPS_ATTR_ENCR_TYPE 0x100f +#define IEEE80211_WPS_ATTR_ENCR_TYPE_FLAGS 0x1010 +#define IEEE80211_WPS_ATTR_DEV_NAME 0x1011 +#define IEEE80211_WPS_ATTR_DEV_PASSWORD_ID 0x1012 +#define IEEE80211_WPS_ATTR_E_HASH1 0x1014 +#define IEEE80211_WPS_ATTR_E_HASH2 0x1015 +#define IEEE80211_WPS_ATTR_E_SNONCE1 0x1016 +#define IEEE80211_WPS_ATTR_E_SNONCE2 0x1017 +#define IEEE80211_WPS_ATTR_ENCR_SETTINGS 0x1018 +#define IEEE80211_WPS_ATTR_ENROLLEE_NONCE 0x101a +#define IEEE80211_WPS_ATTR_FEATURE_ID 0x101b +#define IEEE80211_WPS_ATTR_IDENTITY 0x101c +#define IEEE80211_WPS_ATTR_IDENTITY_PROOF 0x101d +#define IEEE80211_WPS_ATTR_KEY_WRAP_AUTH 0x101e +#define IEEE80211_WPS_ATTR_KEY_ID 0x101f +#define IEEE80211_WPS_ATTR_MAC_ADDR 0x1020 +#define IEEE80211_WPS_ATTR_MANUFACTURER 0x1021 +#define IEEE80211_WPS_ATTR_MSG_TYPE 0x1022 +#define IEEE80211_WPS_ATTR_MODEL_NAME 0x1023 +#define IEEE80211_WPS_ATTR_MODEL_NUMBER 0x1024 +#define IEEE80211_WPS_ATTR_NETWORK_INDEX 0x1026 +#define IEEE80211_WPS_ATTR_NETWORK_KEY 0x1027 +#define IEEE80211_WPS_ATTR_NETWORK_KEY_INDEX 0x1028 +#define IEEE80211_WPS_ATTR_NEW_DEVICE_NAME 0x1029 +#define IEEE80211_WPS_ATTR_NEW_PASSWORD 0x102a +#define IEEE80211_WPS_ATTR_OOB_DEVICE_PASSWORD 0x102c +#define IEEE80211_WPS_ATTR_OS_VERSION 0x102d +#define IEEE80211_WPS_ATTR_POWER_LEVEL 0x102f +#define IEEE80211_WPS_ATTR_PSK_CURRENT 0x1030 +#define IEEE80211_WPS_ATTR_PSK_MAX 0x1031 +#define IEEE80211_WPS_ATTR_PUBLIC_KEY 0x1032 +#define IEEE80211_WPS_ATTR_RADIO_ENABLE 0x1033 +#define IEEE80211_WPS_ATTR_REBOOT 0x1034 +#define IEEE80211_WPS_ATTR_REGISTRAR_CURRENT 0x1035 +#define IEEE80211_WPS_ATTR_REGISTRAR_ESTBLSHD 0x1036 +#define IEEE80211_WPS_ATTR_REGISTRAR_LIST 0x1037 +#define IEEE80211_WPS_ATTR_REGISTRAR_MAX 0x1038 +#define IEEE80211_WPS_ATTR_REGISTRAR_NONCE 0x1039 +#define IEEE80211_WPS_ATTR_REQUEST_TYPE 0x103a +#define IEEE80211_WPS_ATTR_RESPONSE_TYPE 0x103b +#define IEEE80211_WPS_ATTR_RF_BANDS 0x103c +#define IEEE80211_WPS_ATTR_R_HASH1 0x103d +#define IEEE80211_WPS_ATTR_R_HASH2 0x103e +#define IEEE80211_WPS_ATTR_R_SNONCE1 0x103f +#define IEEE80211_WPS_ATTR_R_SNONCE2 0x1040 +#define IEEE80211_WPS_ATTR_SELECTED_REGISTRAR 0x1041 +#define IEEE80211_WPS_ATTR_SERIAL_NUMBER 0x1042 +#define IEEE80211_WPS_ATTR_WPS_STATE 0x1044 +#define IEEE80211_WPS_ATTR_SSID 0x1045 +#define IEEE80211_WPS_ATTR_TOTAL_NETWORKS 0x1046 +#define IEEE80211_WPS_ATTR_UUID_E 0x1047 +#define IEEE80211_WPS_ATTR_UUID_R 0x1048 +#define IEEE80211_WPS_ATTR_VENDOR_EXT 0x1049 +#define IEEE80211_WPS_ATTR_VERSION 0x104a +#define IEEE80211_WPS_ATTR_X509_CERT_REQ 0x104b +#define IEEE80211_WPS_ATTR_X509_CERT 0x104c +#define IEEE80211_WPS_ATTR_EAP_IDENTITY 0x104d +#define IEEE80211_WPS_ATTR_MSG_COUNTER 0x104e +#define IEEE80211_WPS_ATTR_PUBKEY_HASH 0x104f +#define IEEE80211_WPS_ATTR_REKEY_KEY 0x1050 +#define IEEE80211_WPS_ATTR_KEY_LIFETIME 0x1051 +#define IEEE80211_WPS_ATTR_PERMITTED_CONFIG_METHODS 0x1052 +#define IEEE80211_WPS_ATTR_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053 +#define IEEE80211_WPS_ATTR_PRIMARY_DEV_TYPE 0x1054 +#define IEEE80211_WPS_ATTR_SECONDARY_DEV_TYPE_LIST 0x1055 +#define IEEE80211_WPS_ATTR_PORTABLE_DEV 0x1056 +#define IEEE80211_WPS_ATTR_AP_SETUP_LOCKED 0x1057 +#define IEEE80211_WPS_ATTR_APPLICATION_EXT 0x1058 +#define IEEE80211_WPS_ATTR_EAP_TYPE 0x1059 +#define IEEE80211_WPS_ATTR_IV 0x1060 +#define IEEE80211_WPS_ATTR_KEY_PROVIDED_AUTO 0x1061 +#define IEEE80211_WPS_ATTR_802_1X_ENABLED 0x1062 +#define IEEE80211_WPS_ATTR_AP_SESSION_KEY 0x1063 +#define IEEE80211_WPS_ATTR_WEP_TRANSMIT_KEY 0x1064 +#define IEEE80211_WPS_ATTR_REQUESTED_DEV_TYPE 0x106a +#define IEEE80211_WPS_ATTR_EXTENSIBILITY_TEST 0x10fa /* _NOT_ defined in the spec */ + +/* RF bands bitmask */ +#define IEEE80211_WPS_RF_BAND_24GHZ 0x01 +#define IEEE80211_WPS_RF_BAND_50GHZ 0x02 +#define IEEE80211_WPS_RF_BAND_600GHZ 0x04 + +/* Config methods bitmask */ +#define IEEE80211_WPS_CONFIG_USBA 0x0001 +#define IEEE80211_WPS_CONFIG_ETHERNET 0x0002 +#define IEEE80211_WPS_CONFIG_LABEL 0x0004 +#define IEEE80211_WPS_CONFIG_DISPLAY 0x0008 +#define IEEE80211_WPS_CONFIG_EXT_NFC_TOKEN 0x0010 +#define IEEE80211_WPS_CONFIG_INT_NFC_TOKEN 0x0020 +#define IEEE80211_WPS_CONFIG_NFC_INTERFACE 0x0040 +#define IEEE80211_WPS_CONFIG_PUSHBUTTON 0x0080 +#define IEEE80211_WPS_CONFIG_KEYPAD 0x0100 +#define IEEE80211_WPS_CONFIG_VIRT_PUSHBUTTON 0x0200 +#define IEEE80211_WPS_CONFIG_PHY_PUSHBUTTON 0x0400 +#define IEEE80211_WPS_CONFIG_P2PS 0x1000 +#define IEEE80211_WPS_CONFIG_VIRT_DISPLAY 0x2000 +#define IEEE80211_WPS_CONFIG_PHY_DISPLAY 0x4000 + +/* Wi-Fi Protected Setup state */ +#define IEEE80211_WPS_STATE_NOT_CONFIGURED 0x01 +#define IEEE80211_WPS_STATE_CONFIGURED 0x02 +#endif /* _NET80211_IEEE80211_WPS_H_ */ diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c index 3bb65ddbb2de..44dc804f81a5 100644 --- a/sys/netpfil/pf/pf_ioctl.c +++ b/sys/netpfil/pf/pf_ioctl.c @@ -3577,14 +3577,18 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td struct pf_src_node *n, *p, *pstore; uint32_t i, nr = 0; + for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; + i++, sh++) { + PF_HASHROW_LOCK(sh); + LIST_FOREACH(n, &sh->nodes, entry) + nr++; + PF_HASHROW_UNLOCK(sh); + } + + psn->psn_len = min(psn->psn_len, + sizeof(struct pf_src_node) * nr); + if (psn->psn_len == 0) { - for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; - i++, sh++) { - PF_HASHROW_LOCK(sh); - LIST_FOREACH(n, &sh->nodes, entry) - nr++; - PF_HASHROW_UNLOCK(sh); - } psn->psn_len = sizeof(struct pf_src_node) * nr; break; } diff --git a/sys/riscv/include/riscvreg.h b/sys/riscv/include/riscvreg.h index 12e4a8fbbefa..ab99ed8a4143 100644 --- a/sys/riscv/include/riscvreg.h +++ b/sys/riscv/include/riscvreg.h @@ -155,7 +155,8 @@ #define SATP_MODE_SV39 (8ULL << SATP_MODE_S) #define SATP_MODE_SV48 (9ULL << SATP_MODE_S) -#define XLEN 8 +#define XLEN __riscv_xlen +#define XLEN_BYTES (XLEN / 8) #define INSN_SIZE 4 #define INSN_C_SIZE 2 diff --git a/sys/riscv/riscv/copyinout.S b/sys/riscv/riscv/copyinout.S index 5e80f4900c8b..e0e53f2029a9 100644 --- a/sys/riscv/riscv/copyinout.S +++ b/sys/riscv/riscv/copyinout.S @@ -1,5 +1,6 @@ /*- * Copyright (c) 2015-2018 Ruslan Bukin + * Copyright (c) 2019 Mitchell Horne * All rights reserved. * * Portions of this software were developed by SRI International and the @@ -51,61 +52,95 @@ copyio_fault_nopcb: ret END(copyio_fault) +/* + * copycommon - common copy routine + * + * a0 - Source address + * a1 - Destination address + * a2 - Size of copy + */ + .macro copycommon + la a6, copyio_fault /* Get the handler address */ + SET_FAULT_HANDLER(a6, a7) /* Set the handler */ + ENTER_USER_ACCESS(a7) + + li t2, XLEN_BYTES + blt a2, t2, 3f /* Byte-copy if len < XLEN_BYTES */ + + /* + * Compare lower bits of src and dest. + * If they are aligned with each other, we can do word copy. + */ + andi t0, a0, (XLEN_BYTES-1) /* Low bits of src */ + andi t1, a1, (XLEN_BYTES-1) /* Low bits of dest */ + bne t0, t1, 3f /* Misaligned. Go to byte copy */ + beqz t0, 2f /* Already word-aligned, skip ahead */ + + /* Byte copy until the first word-aligned address */ +1: lb a4, 0(a0) /* Load byte from src */ + addi a0, a0, 1 + sb a4, 0(a1) /* Store byte in dest */ + addi a1, a1, 1 + addi a2, a2, -1 /* len-- */ + andi t0, a0, (XLEN_BYTES-1) + bnez t0, 1b + + /* Copy words */ +2: ld a4, 0(a0) /* Load word from src */ + addi a0, a0, XLEN_BYTES + sd a4, 0(a1) /* Store word in dest */ + addi a1, a1, XLEN_BYTES + addi a2, a2, -XLEN_BYTES /* len -= XLEN_BYTES */ + bgeu a2, t2, 2b /* Again if len >= XLEN_BYTES */ + + /* Check if we're finished */ + beqz a2, 4f + + /* Copy any remaining bytes */ +3: lb a4, 0(a0) /* Load byte from src */ + addi a0, a0, 1 + sb a4, 0(a1) /* Store byte in dest */ + addi a1, a1, 1 + addi a2, a2, -1 /* len-- */ + bnez a2, 3b + +4: EXIT_USER_ACCESS(a7) + SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ + .endm + /* * Copies from a kernel to user address * * int copyout(const void *kaddr, void *udaddr, size_t len) */ ENTRY(copyout) - beqz a2, 2f /* If len == 0 then skip loop */ + beqz a2, copyout_end /* If len == 0 then skip loop */ add a3, a1, a2 li a4, VM_MAXUSER_ADDRESS bgt a3, a4, copyio_fault_nopcb - la a6, copyio_fault /* Get the handler address */ - SET_FAULT_HANDLER(a6, a7) /* Set the handler */ - ENTER_USER_ACCESS(a7) + copycommon -1: lb a4, 0(a0) /* Load from kaddr */ - addi a0, a0, 1 - sb a4, 0(a1) /* Store in uaddr */ - addi a1, a1, 1 - addi a2, a2, -1 /* len-- */ - bnez a2, 1b - - EXIT_USER_ACCESS(a7) - SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ - -2: li a0, 0 /* return 0 */ +copyout_end: + li a0, 0 /* return 0 */ ret END(copyout) /* * Copies from a user to kernel address * - * int copyin(const void *uaddr, void *kdaddr, size_t len) + * int copyin(const void *uaddr, void *kaddr, size_t len) */ ENTRY(copyin) - beqz a2, 2f /* If len == 0 then skip loop */ + beqz a2, copyin_end /* If len == 0 then skip loop */ add a3, a0, a2 li a4, VM_MAXUSER_ADDRESS bgt a3, a4, copyio_fault_nopcb - la a6, copyio_fault /* Get the handler address */ - SET_FAULT_HANDLER(a6, a7) /* Set the handler */ - ENTER_USER_ACCESS(a7) + copycommon -1: lb a4, 0(a0) /* Load from uaddr */ - addi a0, a0, 1 - sb a4, 0(a1) /* Store in kaddr */ - addi a1, a1, 1 - addi a2, a2, -1 /* len-- */ - bnez a2, 1b - - EXIT_USER_ACCESS(a7) - SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ - -2: li a0, 0 /* return 0 */ +copyin_end: + li a0, 0 /* return 0 */ ret END(copyin) diff --git a/sys/sparc64/conf/GENERIC b/sys/sparc64/conf/GENERIC index c1adc072c5a7..cc905bf2a54a 100644 --- a/sys/sparc64/conf/GENERIC +++ b/sys/sparc64/conf/GENERIC @@ -217,7 +217,6 @@ device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs -options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s D3.0 support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support diff --git a/sys/sys/param.h b/sys/sys/param.h index b8656ed32886..291e54da8b7a 100644 --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -60,7 +60,7 @@ * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1300008 /* Master, propagated to newvers */ +#define __FreeBSD_version 1300009 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, diff --git a/sys/sys/rmlock.h b/sys/sys/rmlock.h index da5d7274e780..cdcb6edc3b92 100644 --- a/sys/sys/rmlock.h +++ b/sys/sys/rmlock.h @@ -54,7 +54,6 @@ void rm_init_flags(struct rmlock *rm, const char *name, int opts); void rm_destroy(struct rmlock *rm); int rm_wowned(const struct rmlock *rm); void rm_sysinit(void *arg); -void rm_sysinit_flags(void *arg); void _rm_wlock_debug(struct rmlock *rm, const char *file, int line); void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line); diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h index 43bbc68cf131..58d5881eb767 100644 --- a/sys/sys/rwlock.h +++ b/sys/sys/rwlock.h @@ -130,7 +130,6 @@ void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts); void _rw_destroy(volatile uintptr_t *c); void rw_sysinit(void *arg); -void rw_sysinit_flags(void *arg); int _rw_wowned(const volatile uintptr_t *c); void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line); int __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF); diff --git a/tests/sys/netpfil/pf/ioctl/validation.c b/tests/sys/netpfil/pf/ioctl/validation.c index 9a4eb4fec779..1f713138a47d 100644 --- a/tests/sys/netpfil/pf/ioctl/validation.c +++ b/tests/sys/netpfil/pf/ioctl/validation.c @@ -753,6 +753,38 @@ ATF_TC_CLEANUP(commit, tc) COMMON_CLEANUP(); } +ATF_TC_WITH_CLEANUP(getsrcnodes); +ATF_TC_HEAD(getsrcnodes, tc) +{ + atf_tc_set_md_var(tc, "require.user", "root"); +} + +ATF_TC_BODY(getsrcnodes, tc) +{ + struct pfioc_src_nodes psn; + + COMMON_HEAD(); + + bzero(&psn, sizeof(psn)); + + psn.psn_len = -1; + if (ioctl(dev, DIOCGETSRCNODES, &psn) != 0) + atf_tc_fail("request with size -1 failed"); + + psn.psn_len = 1 << 30; + if (ioctl(dev, DIOCGETSRCNODES, &psn) != 0) + atf_tc_fail("request with size << 30 failed"); + + psn.psn_len = 1 << 31; + if (ioctl(dev, DIOCGETSRCNODES, &psn) != 0) + atf_tc_fail("request with size << 30 failed"); +} + +ATF_TC_CLEANUP(getsrcnodes, tc) +{ + COMMON_CLEANUP(); +} + ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, addtables); @@ -772,6 +804,7 @@ ATF_TP_ADD_TCS(tp) ATF_TP_ADD_TC(tp, cxbegin); ATF_TP_ADD_TC(tp, cxrollback); ATF_TP_ADD_TC(tp, commit); + ATF_TP_ADD_TC(tp, getsrcnodes); return (atf_no_error()); } diff --git a/tools/build/mk/OptionalObsoleteFiles.inc b/tools/build/mk/OptionalObsoleteFiles.inc index a123f797a627..918be325cbbf 100644 --- a/tools/build/mk/OptionalObsoleteFiles.inc +++ b/tools/build/mk/OptionalObsoleteFiles.inc @@ -7465,6 +7465,8 @@ OLD_FILES+=usr/share/man/man8/pppctl.8.gz .endif .if ${MK_PROFILE} == no +OLD_FILES+=usr/lib/lib80211_p.a +OLD_FILES+=usr/lib/libBlocksRuntime_p.a OLD_FILES+=usr/lib/libalias_cuseeme_p.a OLD_FILES+=usr/lib/libalias_dummy_p.a OLD_FILES+=usr/lib/libalias_ftp_p.a @@ -7476,40 +7478,64 @@ OLD_FILES+=usr/lib/libalias_skinny_p.a OLD_FILES+=usr/lib/libalias_smedia_p.a OLD_FILES+=usr/lib/libarchive_p.a OLD_FILES+=usr/lib/libasn1_p.a +OLD_FILES+=usr/lib/libauditd_p.a +OLD_FILES+=usr/lib/libavl_p.a +OLD_FILES+=usr/lib/libbe_p.a OLD_FILES+=usr/lib/libbegemot_p.a +OLD_FILES+=usr/lib/libblacklist_p.a OLD_FILES+=usr/lib/libbluetooth_p.a OLD_FILES+=usr/lib/libbsdxml_p.a OLD_FILES+=usr/lib/libbsm_p.a OLD_FILES+=usr/lib/libbsnmp_p.a OLD_FILES+=usr/lib/libbz2_p.a +OLD_FILES+=usr/lib/libc++_p.a OLD_FILES+=usr/lib/libc_p.a OLD_FILES+=usr/lib/libcalendar_p.a OLD_FILES+=usr/lib/libcam_p.a OLD_FILES+=usr/lib/libcom_err_p.a OLD_FILES+=usr/lib/libcompat_p.a +OLD_FILES+=usr/lib/libcompiler_rt_p.a OLD_FILES+=usr/lib/libcrypt_p.a OLD_FILES+=usr/lib/libcrypto_p.a +OLD_FILES+=usr/lib/libctf_p.a OLD_FILES+=usr/lib/libcurses_p.a OLD_FILES+=usr/lib/libcursesw_p.a +OLD_FILES+=usr/lib/libcuse_p.a +OLD_FILES+=usr/lib/libcxxrt_p.a +OLD_FILES+=usr/lib/libdevctl_p.a OLD_FILES+=usr/lib/libdevinfo_p.a OLD_FILES+=usr/lib/libdevstat_p.a OLD_FILES+=usr/lib/libdialog_p.a +OLD_FILES+=usr/lib/libdl_p.a +OLD_FILES+=usr/lib/libdpv_p.a +OLD_FILES+=usr/lib/libdtrace_p.a +OLD_FILES+=usr/lib/libdwarf_p.a OLD_FILES+=usr/lib/libedit_p.a +OLD_FILES+=usr/lib/libefivar_p.a OLD_FILES+=usr/lib/libelf_p.a +OLD_FILES+=usr/lib/libexecinfo_p.a OLD_FILES+=usr/lib/libfetch_p.a +OLD_FILES+=usr/lib/libfigpar_p.a OLD_FILES+=usr/lib/libfl_p.a OLD_FILES+=usr/lib/libform_p.a OLD_FILES+=usr/lib/libformw_p.a +OLD_FILES+=usr/lib/libgcc_eh_p.a OLD_FILES+=usr/lib/libgcc_p.a OLD_FILES+=usr/lib/libgeom_p.a OLD_FILES+=usr/lib/libgnuregex_p.a +OLD_FILES+=usr/lib/libgpio_p.a OLD_FILES+=usr/lib/libgssapi_krb5_p.a +OLD_FILES+=usr/lib/libgssapi_ntlm_p.a OLD_FILES+=usr/lib/libgssapi_p.a +OLD_FILES+=usr/lib/libgssapi_spnego_p.a OLD_FILES+=usr/lib/libhdb_p.a OLD_FILES+=usr/lib/libheimbase_p.a +OLD_FILES+=usr/lib/libheimntlm_p.a OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib/libhistory_p.a +OLD_FILES+=usr/lib/libhx509_p.a OLD_FILES+=usr/lib/libipsec_p.a +OLD_FILES+=usr/lib/libipt_p.a OLD_FILES+=usr/lib/libjail_p.a OLD_FILES+=usr/lib/libkadm5clnt_p.a OLD_FILES+=usr/lib/libkadm5srv_p.a @@ -7520,6 +7546,7 @@ OLD_FILES+=usr/lib/libkrb5_p.a OLD_FILES+=usr/lib/libkvm_p.a OLD_FILES+=usr/lib/libl_p.a OLD_FILES+=usr/lib/libln_p.a +OLD_FILES+=usr/lib/liblzma_p.a OLD_FILES+=usr/lib/libm_p.a OLD_FILES+=usr/lib/libmagic_p.a OLD_FILES+=usr/lib/libmd_p.a @@ -7528,26 +7555,52 @@ OLD_FILES+=usr/lib/libmenu_p.a OLD_FILES+=usr/lib/libmenuw_p.a OLD_FILES+=usr/lib/libmilter_p.a OLD_FILES+=usr/lib/libmp_p.a +OLD_FILES+=usr/lib/libmt_p.a OLD_FILES+=usr/lib/libncurses_p.a OLD_FILES+=usr/lib/libncursesw_p.a OLD_FILES+=usr/lib/libnetgraph_p.a OLD_FILES+=usr/lib/libngatm_p.a +OLD_FILES+=usr/lib/libnv_p.a +OLD_FILES+=usr/lib/libnvpair_p.a +OLD_FILES+=usr/lib/libopencsd_p.a OLD_FILES+=usr/lib/libopie_p.a OLD_FILES+=usr/lib/libpanel_p.a OLD_FILES+=usr/lib/libpanelw_p.a +OLD_FILES+=usr/lib/libpathconv_p.a OLD_FILES+=usr/lib/libpcap_p.a +OLD_FILES+=usr/lib/libpjdlog_p.a OLD_FILES+=usr/lib/libpmc_p.a +OLD_FILES+=usr/lib/libprivatebsdstat_p.a +OLD_FILES+=usr/lib/libprivatedevdctl_p.a +OLD_FILES+=usr/lib/libprivateevent_p.a +OLD_FILES+=usr/lib/libprivateheimipcc_p.a +OLD_FILES+=usr/lib/libprivateheimipcs_p.a +OLD_FILES+=usr/lib/libprivateifconfig_p.a +OLD_FILES+=usr/lib/libprivateldns_p.a +OLD_FILES+=usr/lib/libprivatesqlite3_p.a +OLD_FILES+=usr/lib/libprivatessh_p.a +OLD_FILES+=usr/lib/libprivateucl_p.a +OLD_FILES+=usr/lib/libprivateunbound_p.a +OLD_FILES+=usr/lib/libprivatezstd_p.a +OLD_FILES+=usr/lib/libproc_p.a +OLD_FILES+=usr/lib/libprocstat_p.a OLD_FILES+=usr/lib/libpthread_p.a OLD_FILES+=usr/lib/libradius_p.a +OLD_FILES+=usr/lib/libregex_p.a OLD_FILES+=usr/lib/libroken_p.a OLD_FILES+=usr/lib/librpcsvc_p.a +OLD_FILES+=usr/lib/librss_p.a OLD_FILES+=usr/lib/librt_p.a +OLD_FILES+=usr/lib/librtld_db_p.a OLD_FILES+=usr/lib/libsbuf_p.a OLD_FILES+=usr/lib/libsdp_p.a OLD_FILES+=usr/lib/libsmb_p.a OLD_FILES+=usr/lib/libssl_p.a +OLD_FILES+=usr/lib/libstdbuf_p.a OLD_FILES+=usr/lib/libstdc++_p.a +OLD_FILES+=usr/lib/libstdthreads_p.a OLD_FILES+=usr/lib/libsupc++_p.a +OLD_FILES+=usr/lib/libsysdecode_p.a OLD_FILES+=usr/lib/libtacplus_p.a OLD_FILES+=usr/lib/libtermcap_p.a OLD_FILES+=usr/lib/libtermcapw_p.a @@ -7559,14 +7612,23 @@ OLD_FILES+=usr/lib/libtinfo_p.a OLD_FILES+=usr/lib/libtinfow_p.a OLD_FILES+=usr/lib/libufs_p.a OLD_FILES+=usr/lib/libugidfw_p.a +OLD_FILES+=usr/lib/libulog_p.a +OLD_FILES+=usr/lib/libumem_p.a +OLD_FILES+=usr/lib/libusb_p.a OLD_FILES+=usr/lib/libusbhid_p.a +OLD_FILES+=usr/lib/libutempter_p.a OLD_FILES+=usr/lib/libutil_p.a +OLD_FILES+=usr/lib/libuutil_p.a OLD_FILES+=usr/lib/libvgl_p.a +OLD_FILES+=usr/lib/libvmmapi_p.a OLD_FILES+=usr/lib/libwind_p.a OLD_FILES+=usr/lib/libwrap_p.a +OLD_FILES+=usr/lib/libxo_p.a OLD_FILES+=usr/lib/liby_p.a OLD_FILES+=usr/lib/libypclnt_p.a OLD_FILES+=usr/lib/libz_p.a +OLD_FILES+=usr/lib/libzfs_core_p.a +OLD_FILES+=usr/lib/libzfs_p.a OLD_FILES+=usr/lib/private/libldns_p.a OLD_FILES+=usr/lib/private/libssh_p.a .endif diff --git a/tools/tools/nanobsd/pcengines/ALIX_DSK b/tools/tools/nanobsd/pcengines/ALIX_DSK index b46f3ee37569..7635a8c80b93 100644 --- a/tools/tools/nanobsd/pcengines/ALIX_DSK +++ b/tools/tools/nanobsd/pcengines/ALIX_DSK @@ -54,7 +54,6 @@ device miibus device vr device wlan options IEEE80211_DEBUG -options IEEE80211_AMPDU_AGE options IEEE80211_SUPPORT_MESH device wlan_wep device wlan_ccmp diff --git a/usr.bin/cmp/cmp.c b/usr.bin/cmp/cmp.c index dcf32a98dcc7..b8d5dba14ec4 100644 --- a/usr.bin/cmp/cmp.c +++ b/usr.bin/cmp/cmp.c @@ -116,16 +116,14 @@ main(int argc, char *argv[]) if (argc < 2 || argc > 4) usage(); - if (caph_limit_stdio() == -1) - err(ERR_EXIT, "failed to limit stdio"); - /* Backward compatibility -- handle "-" meaning stdin. */ special = 0; if (strcmp(file1 = argv[0], "-") == 0) { special = 1; - fd1 = STDIN_FILENO; + fd1 = 0; file1 = "stdin"; - } else if ((fd1 = open(file1, oflag, 0)) < 0 && errno != EMLINK) { + } + else if ((fd1 = open(file1, oflag, 0)) < 0 && errno != EMLINK) { if (!sflag) err(ERR_EXIT, "%s", file1); else @@ -136,9 +134,10 @@ main(int argc, char *argv[]) errx(ERR_EXIT, "standard input may only be specified once"); special = 1; - fd2 = STDIN_FILENO; + fd2 = 0; file2 = "stdin"; - } else if ((fd2 = open(file2, oflag, 0)) < 0 && errno != EMLINK) { + } + else if ((fd2 = open(file2, oflag, 0)) < 0 && errno != EMLINK) { if (!sflag) err(ERR_EXIT, "%s", file2); else @@ -176,6 +175,16 @@ main(int argc, char *argv[]) if (caph_fcntls_limit(fd2, fcntls) < 0) err(ERR_EXIT, "unable to limit fcntls for %s", file2); + if (!special) { + cap_rights_init(&rights); + if (caph_rights_limit(STDIN_FILENO, &rights) < 0) { + err(ERR_EXIT, "unable to limit stdio"); + } + } + + if (caph_limit_stdout() == -1 || caph_limit_stderr() == -1) + err(ERR_EXIT, "unable to limit stdio"); + caph_cache_catpages(); if (caph_enter() < 0) diff --git a/usr.bin/cmp/tests/cmp_test2.sh b/usr.bin/cmp/tests/cmp_test2.sh index 2221e623888a..938e7f499ee5 100755 --- a/usr.bin/cmp/tests/cmp_test2.sh +++ b/usr.bin/cmp/tests/cmp_test2.sh @@ -31,11 +31,10 @@ special_head() { special_body() { echo 0123456789abcdef > a echo 0123456789abcdeg > b - cat a | atf_check -s exit:0 cmp a - - cat a | atf_check -s exit:0 cmp - a - cat b | atf_check -s not-exit:0 cmp a - - cat b | atf_check -s not-exit:0 cmp - a - true + atf_check -s exit:0 -o empty -e empty -x "cat a | cmp a -" + atf_check -s exit:0 -o empty -e empty -x "cat a | cmp - a" + atf_check -s exit:1 -o not-empty -e empty -x "cat b | cmp a -" + atf_check -s exit:1 -o not-empty -e empty -x "cat b | cmp - a" } atf_test_case symlink diff --git a/usr.bin/systat/devs.c b/usr.bin/systat/devs.c index e6b1e48eb10e..c802a843eb2c 100644 --- a/usr.bin/systat/devs.c +++ b/usr.bin/systat/devs.c @@ -193,6 +193,11 @@ dsmatchselect(const char *args, devstat_select_mode select_mode, int maxshowdevs int i; int retval = 0; + if (!args) { + warnx("dsmatchselect: no arguments"); + return(1); + } + /* * Break the (pipe delimited) input string out into separate * strings. @@ -251,6 +256,11 @@ dsselect(const char *args, devstat_select_mode select_mode, int maxshowdevs, int i; int retval = 0; + if (!args) { + warnx("dsselect: no argument"); + return(1); + } + /* * If we've gone through this code before, free previously * allocated resources. diff --git a/usr.bin/units/units.1 b/usr.bin/units/units.1 index 78f51654179c..6a9a801cc155 100644 --- a/usr.bin/units/units.1 +++ b/usr.bin/units/units.1 @@ -158,7 +158,7 @@ by careless unit definitions. Comments in the unit definition file begin with a '#' or '/' character at the beginning of a line. .Pp -Prefixes are defined in the same was as standard units, but with +Prefixes are defined in the same way as standard units, but with a trailing dash at the end of the prefix name. If a unit is not found even after removing trailing 's' or 'es', then it will be checked diff --git a/usr.sbin/freebsd-update/freebsd-update.sh b/usr.sbin/freebsd-update/freebsd-update.sh index fa5bdbda0016..68fdf2b774b8 100644 --- a/usr.sbin/freebsd-update/freebsd-update.sh +++ b/usr.sbin/freebsd-update/freebsd-update.sh @@ -310,6 +310,7 @@ config_SourceRelease () { if echo ${UNAME_r} | grep -qE '^[0-9.]+$'; then UNAME_r="${UNAME_r}-RELEASE" fi + export UNAME_r } # Define what happens to output of utilities @@ -667,17 +668,23 @@ fetchupgrade_check_params () { FETCHDIR=${RELNUM}/${ARCH} PATCHDIR=${RELNUM}/${ARCH}/bp - # Disallow upgrade from a version that is not `-RELEASE` - if ! echo "${RELNUM}" | grep -qE -- "-RELEASE$"; then - echo -n "`basename $0`: " - cat <<- EOF - Cannot upgrade from a version that is not a '-RELEASE' using `basename $0`. - Instead, FreeBSD can be directly upgraded by source or upgraded to a - RELEASE/RELENG version prior to running `basename $0`. - EOF - echo "System version: ${RELNUM}" - exit 1 - fi + # Disallow upgrade from a version that is not a release + case ${RELNUM} in + *-RELEASE | *-ALPHA* | *-BETA* | *-RC*) + ;; + *) + echo -n "`basename $0`: " + cat <<- EOF + Cannot upgrade from a version that is not a release + (including alpha, beta and release candidates) + using `basename $0`. Instead, FreeBSD can be directly + upgraded by source or upgraded to a RELEASE/RELENG version + prior to running `basename $0`. + Currently running: ${RELNUM} + EOF + exit 1 + ;; + esac # Figure out what directory contains the running kernel BOOTFILE=`sysctl -n kern.bootfile` @@ -2917,10 +2924,11 @@ Kernel updates have been installed. Please reboot and run install_from_index INDEX-NEW || return 1 install_delete INDEX-OLD INDEX-NEW || return 1 - # Rebuild /etc/spwd.db and /etc/pwd.db if necessary. + # Rebuild generated pwd files. if [ ${BASEDIR}/etc/master.passwd -nt ${BASEDIR}/etc/spwd.db ] || - [ ${BASEDIR}/etc/master.passwd -nt ${BASEDIR}/etc/pwd.db ]; then - pwd_mkdb -d ${BASEDIR}/etc ${BASEDIR}/etc/master.passwd + [ ${BASEDIR}/etc/master.passwd -nt ${BASEDIR}/etc/pwd.db ] || + [ ${BASEDIR}/etc/master.passwd -nt ${BASEDIR}/etc/passwd ]; then + pwd_mkdb -d ${BASEDIR}/etc -p ${BASEDIR}/etc/master.passwd fi # Rebuild /etc/login.conf.db if necessary. diff --git a/usr.sbin/kbdmap/kbdmap.c b/usr.sbin/kbdmap/kbdmap.c index e1dbdbe9b41d..555f4b3c3b81 100644 --- a/usr.sbin/kbdmap/kbdmap.c +++ b/usr.sbin/kbdmap/kbdmap.c @@ -241,8 +241,7 @@ get_font(void) if (strcmp(buf, "NO")) { if (fnt) free(fnt); - fnt = (char *) malloc(strlen(buf) + 1); - strcpy(fnt, buf); + fnt = strdup(buf); } } }