iwm - Reduce gratuitous differences with Linux iwlwifi in struct naming.
* Rename some structs and struct members for firmware handling. Submitted by: Augustin Cavalier <waddlesplash@gmail.com> (Haiku) Obtained from: DragonFlyBSD (4b1006a6e4d0f61d48c67b46e1f791e30837db67)
This commit is contained in:
parent
f25aeab82b
commit
41330addd7
@ -305,16 +305,16 @@ static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
|
||||
static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
|
||||
bus_addr_t, uint32_t);
|
||||
static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *,
|
||||
const struct iwm_fw_img *,
|
||||
int, int *);
|
||||
static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
|
||||
const struct iwm_fw_sects *,
|
||||
const struct iwm_fw_img *,
|
||||
int, int *);
|
||||
static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
|
||||
const struct iwm_fw_sects *);
|
||||
const struct iwm_fw_img *);
|
||||
static int iwm_pcie_load_given_ucode(struct iwm_softc *,
|
||||
const struct iwm_fw_sects *);
|
||||
static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
|
||||
const struct iwm_fw_img *);
|
||||
static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
|
||||
static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
|
||||
static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
|
||||
static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
|
||||
@ -426,7 +426,7 @@ static int
|
||||
iwm_firmware_store_section(struct iwm_softc *sc,
|
||||
enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
|
||||
{
|
||||
struct iwm_fw_sects *fws;
|
||||
struct iwm_fw_img *fws;
|
||||
struct iwm_fw_desc *fwone;
|
||||
|
||||
if (type >= IWM_UCODE_TYPE_MAX)
|
||||
@ -434,11 +434,11 @@ iwm_firmware_store_section(struct iwm_softc *sc,
|
||||
if (dlen < sizeof(uint32_t))
|
||||
return EINVAL;
|
||||
|
||||
fws = &sc->sc_fw.fw_sects[type];
|
||||
fws = &sc->sc_fw.img[type];
|
||||
if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
|
||||
return EINVAL;
|
||||
|
||||
fwone = &fws->fw_sect[fws->fw_count];
|
||||
fwone = &fws->sec[fws->fw_count];
|
||||
|
||||
/* first 32bit are device load offset */
|
||||
memcpy(&fwone->offset, data, sizeof(uint32_t));
|
||||
@ -536,7 +536,7 @@ iwm_fw_info_free(struct iwm_fw_info *fw)
|
||||
{
|
||||
firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
|
||||
fw->fw_fp = NULL;
|
||||
memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
|
||||
memset(fw->img, 0, sizeof(fw->img));
|
||||
}
|
||||
|
||||
static int
|
||||
@ -545,7 +545,7 @@ iwm_read_firmware(struct iwm_softc *sc)
|
||||
struct iwm_fw_info *fw = &sc->sc_fw;
|
||||
const struct iwm_tlv_ucode_header *uhdr;
|
||||
const struct iwm_ucode_tlv *tlv;
|
||||
struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
|
||||
struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
|
||||
enum iwm_ucode_tlv_type tlv_type;
|
||||
const struct firmware *fwp;
|
||||
const uint8_t *data;
|
||||
@ -694,11 +694,11 @@ iwm_read_firmware(struct iwm_softc *sc)
|
||||
}
|
||||
num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
|
||||
if (num_of_cpus == 2) {
|
||||
fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
|
||||
fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
|
||||
TRUE;
|
||||
fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
|
||||
fw->img[IWM_UCODE_INIT].is_dual_cpus =
|
||||
TRUE;
|
||||
fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
|
||||
fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
|
||||
TRUE;
|
||||
} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
|
||||
device_printf(sc->sc_dev,
|
||||
@ -831,10 +831,10 @@ iwm_read_firmware(struct iwm_softc *sc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
|
||||
sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
|
||||
paging_mem_size;
|
||||
usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
|
||||
sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
|
||||
sc->sc_fw.img[usniffer_img].paging_mem_size =
|
||||
paging_mem_size;
|
||||
break;
|
||||
|
||||
@ -2447,7 +2447,7 @@ iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
|
||||
|
||||
static int
|
||||
iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
|
||||
const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
|
||||
{
|
||||
int shift_param;
|
||||
int i, ret = 0, sec_num = 0x1;
|
||||
@ -2470,15 +2470,15 @@ iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
|
||||
* PAGING_SEPARATOR_SECTION delimiter - separate between
|
||||
* CPU2 non paged to CPU2 paging sec.
|
||||
*/
|
||||
if (!image->fw_sect[i].data ||
|
||||
image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
|
||||
image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
if (!image->sec[i].data ||
|
||||
image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
|
||||
image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
|
||||
"Break since Data not valid or Empty section, sec = %d\n",
|
||||
i);
|
||||
break;
|
||||
}
|
||||
ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
|
||||
ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2509,7 +2509,7 @@ iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
|
||||
|
||||
static int
|
||||
iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
|
||||
const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
|
||||
{
|
||||
int shift_param;
|
||||
int i, ret = 0;
|
||||
@ -2532,16 +2532,16 @@ iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
|
||||
* PAGING_SEPARATOR_SECTION delimiter - separate between
|
||||
* CPU2 non paged to CPU2 paging sec.
|
||||
*/
|
||||
if (!image->fw_sect[i].data ||
|
||||
image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
|
||||
image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
if (!image->sec[i].data ||
|
||||
image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
|
||||
image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
|
||||
"Break since Data not valid or Empty section, sec = %d\n",
|
||||
i);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
|
||||
ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -2553,8 +2553,7 @@ iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
|
||||
}
|
||||
|
||||
static int
|
||||
iwm_pcie_load_given_ucode(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *image)
|
||||
iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
|
||||
{
|
||||
int ret = 0;
|
||||
int first_ucode_section;
|
||||
@ -2593,7 +2592,7 @@ iwm_pcie_load_given_ucode(struct iwm_softc *sc,
|
||||
|
||||
int
|
||||
iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *image)
|
||||
const struct iwm_fw_img *image)
|
||||
{
|
||||
int ret = 0;
|
||||
int first_ucode_section;
|
||||
@ -2631,8 +2630,7 @@ iwm_enable_fw_load_int(struct iwm_softc *sc)
|
||||
|
||||
/* XXX Add proper rfkill support code */
|
||||
static int
|
||||
iwm_start_fw(struct iwm_softc *sc,
|
||||
const struct iwm_fw_sects *fw)
|
||||
iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -2824,12 +2822,12 @@ iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
|
||||
{
|
||||
struct iwm_notification_wait alive_wait;
|
||||
struct iwm_mvm_alive_data alive_data;
|
||||
const struct iwm_fw_sects *fw;
|
||||
const struct iwm_fw_img *fw;
|
||||
enum iwm_ucode_type old_type = sc->cur_ucode;
|
||||
int error;
|
||||
static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
|
||||
|
||||
fw = &sc->sc_fw.fw_sects[ucode_type];
|
||||
fw = &sc->sc_fw.img[ucode_type];
|
||||
sc->cur_ucode = ucode_type;
|
||||
sc->ucode_loaded = FALSE;
|
||||
|
||||
@ -4473,7 +4471,7 @@ static boolean_t
|
||||
iwm_mvm_is_lar_supported(struct iwm_softc *sc)
|
||||
{
|
||||
boolean_t nvm_lar = sc->nvm_data->lar_enabled;
|
||||
boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
|
||||
boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
|
||||
IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
if (iwm_lar_disable)
|
||||
@ -4492,9 +4490,9 @@ iwm_mvm_is_lar_supported(struct iwm_softc *sc)
|
||||
static boolean_t
|
||||
iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
|
||||
{
|
||||
return fw_has_api(&sc->ucode_capa,
|
||||
return fw_has_api(&sc->sc_fw.ucode_capa,
|
||||
IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
|
||||
fw_has_capa(&sc->ucode_capa,
|
||||
fw_has_capa(&sc->sc_fw.ucode_capa,
|
||||
IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
|
||||
}
|
||||
|
||||
@ -4515,7 +4513,7 @@ iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
|
||||
int n_channels;
|
||||
uint16_t mcc;
|
||||
#endif
|
||||
int resp_v2 = fw_has_capa(&sc->ucode_capa,
|
||||
int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
|
||||
IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
|
||||
|
||||
if (!iwm_mvm_is_lar_supported(sc)) {
|
||||
@ -4674,7 +4672,7 @@ iwm_init_hw(struct iwm_softc *sc)
|
||||
if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
|
||||
goto error;
|
||||
|
||||
if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
|
||||
goto error;
|
||||
}
|
||||
@ -6208,7 +6206,7 @@ iwm_scan_start(struct ieee80211com *ic)
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: Previous scan not completed yet\n", __func__);
|
||||
}
|
||||
if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
error = iwm_mvm_umac_scan(sc);
|
||||
else
|
||||
error = iwm_mvm_lmac_scan(sc);
|
||||
|
@ -141,7 +141,7 @@ iwm_free_fw_paging(struct iwm_softc *sc)
|
||||
}
|
||||
|
||||
static int
|
||||
iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_img *image)
|
||||
{
|
||||
int sec_idx, idx;
|
||||
uint32_t offset = 0;
|
||||
@ -158,7 +158,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
* CPU2 paging image (including instruction and data)
|
||||
*/
|
||||
for (sec_idx = 0; sec_idx < IWM_UCODE_SECTION_MAX; sec_idx++) {
|
||||
if (image->fw_sect[sec_idx].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
if (image->sec[sec_idx].offset == IWM_PAGING_SEPARATOR_SECTION) {
|
||||
sec_idx++;
|
||||
break;
|
||||
}
|
||||
@ -168,7 +168,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
* If paging is enabled there should be at least 2 more sections left
|
||||
* (one for CSS and one for Paging data)
|
||||
*/
|
||||
if (sec_idx >= nitems(image->fw_sect) - 1) {
|
||||
if (sec_idx >= nitems(image->sec) - 1) {
|
||||
device_printf(sc->sc_dev,
|
||||
"Paging: Missing CSS and/or paging sections\n");
|
||||
iwm_free_fw_paging(sc);
|
||||
@ -181,7 +181,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
sec_idx);
|
||||
|
||||
memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
|
||||
image->fw_sect[sec_idx].data,
|
||||
image->sec[sec_idx].data,
|
||||
sc->fw_paging_db[0].fw_paging_size);
|
||||
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_FW,
|
||||
@ -198,7 +198,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
*/
|
||||
for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
|
||||
memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
|
||||
(const char *)image->fw_sect[sec_idx].data + offset,
|
||||
(const char *)image->sec[sec_idx].data + offset,
|
||||
sc->fw_paging_db[idx].fw_paging_size);
|
||||
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_FW,
|
||||
@ -212,7 +212,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
/* copy the last paging block */
|
||||
if (sc->num_of_pages_in_last_blk > 0) {
|
||||
memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
|
||||
(const char *)image->fw_sect[sec_idx].data + offset,
|
||||
(const char *)image->sec[sec_idx].data + offset,
|
||||
IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
|
||||
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_FW,
|
||||
@ -224,7 +224,7 @@ iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
}
|
||||
|
||||
static int
|
||||
iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_img *image)
|
||||
{
|
||||
int blk_idx = 0;
|
||||
int error, num_of_pages;
|
||||
@ -298,7 +298,7 @@ iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
|
||||
}
|
||||
|
||||
int
|
||||
iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
|
||||
iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_img *fw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -311,7 +311,7 @@ iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
|
||||
|
||||
/* send paging cmd to FW in case CPU2 has paging image */
|
||||
int
|
||||
iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
|
||||
iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_img *fw)
|
||||
{
|
||||
int blk_idx;
|
||||
uint32_t dev_phy_addr;
|
||||
|
@ -107,7 +107,7 @@
|
||||
#define IWM_PAGING_TLV_SECURE_MASK 1
|
||||
|
||||
extern void iwm_free_fw_paging(struct iwm_softc *);
|
||||
extern int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
|
||||
extern int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
|
||||
extern int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_img *);
|
||||
extern int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_img *);
|
||||
|
||||
#endif /* __IF_IWM_FW_H__ */
|
||||
|
@ -390,7 +390,7 @@ static uint8_t
|
||||
ch_id_to_ch_index(uint16_t ch_id)
|
||||
{
|
||||
if (!is_valid_channel(ch_id))
|
||||
return 0xff;
|
||||
return 0xff;
|
||||
|
||||
if (ch_id <= 14)
|
||||
return ch_id - 1;
|
||||
@ -509,7 +509,7 @@ iwm_phy_db_send_all_channel_groups(struct iwm_phy_db *phy_db,
|
||||
int err;
|
||||
struct iwm_phy_db_entry *entry;
|
||||
|
||||
/* Send all the channel specific groups to operational fw */
|
||||
/* Send all the channel specific groups to operational fw */
|
||||
for (i = 0; i < max_ch_groups; i++) {
|
||||
entry = iwm_phy_db_get_section(phy_db,
|
||||
type,
|
||||
|
@ -215,7 +215,7 @@ static inline boolean_t
|
||||
iwm_mvm_rrm_scan_needed(struct iwm_softc *sc)
|
||||
{
|
||||
/* require rrm scan whenever the fw supports it */
|
||||
return fw_has_capa(&sc->ucode_capa,
|
||||
return fw_has_capa(&sc->sc_fw.ucode_capa,
|
||||
IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ iwm_mvm_rx_lmac_scan_complete_notif(struct iwm_softc *sc,
|
||||
/* If this happens, the firmware has mistakenly sent an LMAC
|
||||
* notification during UMAC scans -- warn and ignore it.
|
||||
*/
|
||||
if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: Mistakenly got LMAC notification during UMAC scan\n",
|
||||
__func__);
|
||||
@ -307,7 +307,8 @@ iwm_mvm_lmac_scan_fill_channels(struct iwm_softc *sc,
|
||||
int j;
|
||||
|
||||
for (nchan = j = 0;
|
||||
j < ss->ss_last && nchan < sc->ucode_capa.n_scan_channels; j++) {
|
||||
j < ss->ss_last && nchan < sc->sc_fw.ucode_capa.n_scan_channels;
|
||||
j++) {
|
||||
c = ss->ss_chans[j];
|
||||
/*
|
||||
* Catch other channels, in case we have 900MHz channels or
|
||||
@ -350,7 +351,8 @@ iwm_mvm_umac_scan_fill_channels(struct iwm_softc *sc,
|
||||
int j;
|
||||
|
||||
for (nchan = j = 0;
|
||||
j < ss->ss_last && nchan < sc->ucode_capa.n_scan_channels; j++) {
|
||||
j < ss->ss_last && nchan < sc->sc_fw.ucode_capa.n_scan_channels;
|
||||
j++) {
|
||||
c = ss->ss_chans[j];
|
||||
/*
|
||||
* Catch other channels, in case we have 900MHz channels or
|
||||
@ -495,7 +497,7 @@ iwm_mvm_config_umac_scan(struct iwm_softc *sc)
|
||||
IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
|
||||
IWM_SCAN_CONFIG_RATE_54M);
|
||||
|
||||
cmd_size = sizeof(*scan_config) + sc->ucode_capa.n_scan_channels;
|
||||
cmd_size = sizeof(*scan_config) + sc->sc_fw.ucode_capa.n_scan_channels;
|
||||
|
||||
scan_config = malloc(cmd_size, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (scan_config == NULL)
|
||||
@ -523,7 +525,8 @@ iwm_mvm_config_umac_scan(struct iwm_softc *sc)
|
||||
IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
|
||||
|
||||
for (nchan = j = 0;
|
||||
j < ic->ic_nchans && nchan < sc->ucode_capa.n_scan_channels; j++) {
|
||||
j < ic->ic_nchans && nchan < sc->sc_fw.ucode_capa.n_scan_channels;
|
||||
j++) {
|
||||
c = &ic->ic_channels[j];
|
||||
/* For 2GHz, only populate 11b channels */
|
||||
/* For 5GHz, only populate 11a channels */
|
||||
@ -566,7 +569,7 @@ iwm_mvm_config_umac_scan(struct iwm_softc *sc)
|
||||
static boolean_t
|
||||
iwm_mvm_scan_use_ebs(struct iwm_softc *sc)
|
||||
{
|
||||
const struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
|
||||
const struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
|
||||
|
||||
/* We can only use EBS if:
|
||||
* 1. the feature is supported;
|
||||
@ -596,7 +599,7 @@ iwm_mvm_umac_scan(struct iwm_softc *sc)
|
||||
|
||||
req_len = sizeof(struct iwm_scan_req_umac) +
|
||||
(sizeof(struct iwm_scan_channel_cfg_umac) *
|
||||
sc->ucode_capa.n_scan_channels) +
|
||||
sc->sc_fw.ucode_capa.n_scan_channels) +
|
||||
sizeof(struct iwm_scan_req_umac_tail);
|
||||
if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
|
||||
return ENOMEM;
|
||||
@ -630,7 +633,7 @@ iwm_mvm_umac_scan(struct iwm_softc *sc)
|
||||
|
||||
tail = (void *)((char *)&req->data +
|
||||
sizeof(struct iwm_scan_channel_cfg_umac) *
|
||||
sc->ucode_capa.n_scan_channels);
|
||||
sc->sc_fw.ucode_capa.n_scan_channels);
|
||||
|
||||
/* Check if we're doing an active directed scan. */
|
||||
for (i = 0; i < nssid; i++) {
|
||||
@ -694,7 +697,7 @@ iwm_mvm_lmac_scan(struct iwm_softc *sc)
|
||||
|
||||
req_len = sizeof(struct iwm_scan_req_lmac) +
|
||||
(sizeof(struct iwm_scan_channel_cfg_lmac) *
|
||||
sc->ucode_capa.n_scan_channels) + sizeof(struct iwm_scan_probe_req);
|
||||
sc->sc_fw.ucode_capa.n_scan_channels) + sizeof(struct iwm_scan_probe_req);
|
||||
if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
|
||||
return ENOMEM;
|
||||
req = malloc(req_len, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
@ -764,7 +767,7 @@ iwm_mvm_lmac_scan(struct iwm_softc *sc)
|
||||
ret = iwm_mvm_fill_probe_req(sc,
|
||||
(struct iwm_scan_probe_req *)(req->data +
|
||||
(sizeof(struct iwm_scan_channel_cfg_lmac) *
|
||||
sc->ucode_capa.n_scan_channels)));
|
||||
sc->sc_fw.ucode_capa.n_scan_channels)));
|
||||
if (ret) {
|
||||
free(req, M_DEVBUF);
|
||||
return ret;
|
||||
@ -863,7 +866,7 @@ iwm_mvm_scan_stop_wait(struct iwm_softc *sc)
|
||||
|
||||
IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Preparing to stop scan\n");
|
||||
|
||||
if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
ret = iwm_mvm_umac_scan_abort(sc);
|
||||
else
|
||||
ret = iwm_mvm_lmac_scan_abort(sc);
|
||||
|
@ -186,15 +186,20 @@ struct iwm_fw_desc {
|
||||
uint32_t offset; /* offset in the device */
|
||||
};
|
||||
|
||||
struct iwm_fw_img {
|
||||
struct iwm_fw_desc sec[IWM_UCODE_SECTION_MAX];
|
||||
int fw_count;
|
||||
int is_dual_cpus;
|
||||
uint32_t paging_mem_size;
|
||||
};
|
||||
|
||||
struct iwm_fw_info {
|
||||
const struct firmware *fw_fp;
|
||||
|
||||
struct iwm_fw_sects {
|
||||
struct iwm_fw_desc fw_sect[IWM_UCODE_SECTION_MAX];
|
||||
int fw_count;
|
||||
int is_dual_cpus;
|
||||
uint32_t paging_mem_size;
|
||||
} fw_sects[IWM_UCODE_TYPE_MAX];
|
||||
/* ucode images */
|
||||
struct iwm_fw_img img[IWM_UCODE_TYPE_MAX];
|
||||
|
||||
struct iwm_ucode_capabilities ucode_capa;
|
||||
|
||||
uint32_t phy_config;
|
||||
uint8_t valid_tx_ant;
|
||||
@ -470,7 +475,6 @@ struct iwm_softc {
|
||||
int ucode_loaded;
|
||||
char sc_fwver[32];
|
||||
|
||||
struct iwm_ucode_capabilities ucode_capa;
|
||||
char sc_fw_mcc[3];
|
||||
|
||||
int sc_intmask;
|
||||
|
Loading…
Reference in New Issue
Block a user