iwlwifi: update from iwlwifi-next

Import new code from iwlwifi-next at cb0a1fb7fd86b0062692b5056ca8552906509512
(matching tag: iwlwifi-next-for-kalle-2022-02-18).

Also add files not previously imported because we are not yet compiling
them to ease updating and having them when needed.

This adds MEI (Management Engine) support upstream which we cannot import
(currently GPL-only) so we have stub functions for the missing bits.

This also reduces the diff to upstream.  Changes submitted to avoid
problems with const and  with void * arithmetics were merged.

In the module build Makefile disable CONFIG_IWLWIFI_OPMODE_MODULAR
as we are building iwlwifi as a single module.

Sponsored by:   The FreeBSD Foundation
MFC after:      3 days
This commit is contained in:
Bjoern A. Zeeb 2022-02-24 21:34:13 +00:00
parent 530d274c15
commit d9836fb4b9
91 changed files with 11379 additions and 1554 deletions

View File

@ -7,9 +7,10 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 67
#define IWL_22000_UCODE_API_MAX 70
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@ -39,6 +40,7 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
@ -54,7 +56,13 @@
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
@ -113,6 +121,16 @@
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@ -206,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 1024, \
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@ -234,7 +252,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dccm2_len = IWL_22000_DCCM2_LEN, \
.smem_offset = IWL_22000_SMEM_OFFSET, \
.smem_len = IWL_22000_SMEM_LEN, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
@ -267,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 1024, \
.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@ -281,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
}, \
.mon_dbgi_regs = { \
.write_ptr = { \
.addr = DBGI_SRAM_FIFO_POINTERS, \
.mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
}, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@ -458,6 +482,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@ -626,7 +651,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -639,7 +664,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -652,7 +677,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -665,7 +690,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@ -696,13 +721,6 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = {
.num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 6 AX210 160MHz",
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
@ -805,6 +823,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
.fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
.fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
@ -819,6 +851,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
.uhb_supported = false,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
@ -879,6 +918,47 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = {
.fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = {
.fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = {
.fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = {
.fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
.fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@ -907,3 +987,8 @@ MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -0,0 +1,971 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2019-2021 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
#include "fw/runtime.h"
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
IWL_EXPORT_SYMBOL(iwl_guid);
const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0x81, 0x4F, 0x75, 0xE4,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{
acpi_handle root_handle;
acpi_status status;
root_handle = ACPI_HANDLE(dev);
if (!root_handle) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: Could not retrieve root port handle\n");
return -ENOENT;
}
status = acpi_get_handle(root_handle, method, ret_handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: %s method not found\n", method);
return -ENOENT;
}
return 0;
}
void *iwl_acpi_get_object(struct device *dev, acpi_string method)
{
struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_handle handle;
acpi_status status;
int ret;
ret = iwl_acpi_get_handle(dev, method, &handle);
if (ret)
return ERR_PTR(-ENOENT);
/* Call the method with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &buf);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: %s method invocation failed (status: 0x%x)\n",
method, status);
return ERR_PTR(-ENOENT);
}
return buf.pointer;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
/*
* Generic function for evaluating a method defined in the device specific
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
union acpi_object *args,
const guid_t *guid)
{
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func,
args);
if (!obj) {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method invocation failed (rev: %d, func:%d)\n",
rev, func);
return ERR_PTR(-ENOENT);
}
return obj;
}
/*
* Generic function to evaluate a DSM with no arguments
* and an integer return value,
* (as an integer object or inside a buffer object),
* verify and assign the value in the "value" parameter.
* return 0 in success and the appropriate errno otherwise.
*/
static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
const guid_t *guid, u64 *value,
size_t expected_size)
{
union acpi_object *obj;
int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
if (IS_ERR(obj)) {
IWL_DEBUG_DEV_RADIO(dev,
"Failed to get DSM object. func= %d\n",
func);
return -ENOENT;
}
if (obj->type == ACPI_TYPE_INTEGER) {
*value = obj->integer.value;
} else if (obj->type == ACPI_TYPE_BUFFER) {
__le64 le_value = 0;
if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
return -EINVAL;
/* if the buffer size doesn't match the expected size */
if (obj->buffer.length != expected_size)
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
obj->buffer.length);
/* assuming LE from Intel BIOS spec */
memcpy(&le_value, obj->buffer.pointer,
min_t(size_t, expected_size, (size_t)obj->buffer.length));
*value = le64_to_cpu(le_value);
} else {
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method did not return a valid object, type=%d\n",
obj->type);
ret = -EINVAL;
goto out;
}
IWL_DEBUG_DEV_RADIO(dev,
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
func, ret);
out:
ACPI_FREE(obj);
return ret;
}
/*
* Evaluate a DSM with no arguments and a u8 return value,
*/
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value)
{
int ret;
u64 val;
ret = iwl_acpi_get_dsm_integer(dev, rev, func,
guid, &val, sizeof(u8));
if (ret < 0)
return ret;
/* cast val (u64) to be u8 */
*value = (u8)val;
return 0;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
/*
* Evaluate a DSM with no arguments and a u32 return value,
*/
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
const guid_t *guid, u32 *value)
{
int ret;
u64 val;
ret = iwl_acpi_get_dsm_integer(dev, rev, func,
guid, &val, sizeof(u32));
if (ret < 0)
return ret;
/* cast val (u64) to be u32 */
*value = (u32)val;
return 0;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
union acpi_object *data,
int min_data_size,
int max_data_size,
int *tbl_rev)
{
int i;
union acpi_object *wifi_pkg;
/*
* We need at least one entry in the wifi package that
* describes the domain, and one more entry, otherwise there's
* no point in reading it.
*/
if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size))
return ERR_PTR(-EINVAL);
/*
* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer (each caller has to check by itself
* if the returned revision is supported)).
*/
if (data->type != ACPI_TYPE_PACKAGE ||
data->package.count < 2 ||
data->package.elements[0].type != ACPI_TYPE_INTEGER) {
IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n");
return ERR_PTR(-EINVAL);
}
*tbl_rev = data->package.elements[0].integer.value;
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < data->package.count; i++) {
union acpi_object *domain;
wifi_pkg = &data->package.elements[i];
/* skip entries that are not a package with the right size */
if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
wifi_pkg->package.count < min_data_size ||
wifi_pkg->package.count > max_data_size)
continue;
domain = &wifi_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
domain->integer.value == ACPI_WIFI_DOMAIN)
goto found;
}
return ERR_PTR(-ENOENT);
found:
return wifi_pkg;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
union iwl_tas_config_cmd *cmd, int fw_ver)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
u16 override_iec =
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
if (fw_ver <= 3) {
cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
} else {
cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
cmd->v4.override_tas_iec = (u8)override_iec;
cmd->v4.enable_tas_iec = (u8)enabled_iec;
}
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
enabled = !!wifi_pkg->package.elements[1].integer.value;
} else {
ret = -EINVAL;
goto out_free;
}
if (!enabled) {
IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
ret = 0;
goto out_free;
}
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
cmd->v4.block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
block_list_size);
ret = -EINVAL;
goto out_free;
}
for (i = 0; i < block_list_size; i++) {
u32 country;
if (wifi_pkg->package.elements[3 + i].type !=
ACPI_TYPE_INTEGER) {
IWL_DEBUG_RADIO(fwrt,
"TAS invalid array elem %d\n", 3 + i);
ret = -EINVAL;
goto out_free;
}
country = wifi_pkg->package.elements[3 + i].integer.value;
cmd->v4.block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
ret = 1;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
mcc_val = wifi_pkg->package.elements[1].integer.value;
mcc[0] = (mcc_val >> 8) & 0xff;
mcc[1] = mcc_val & 0xff;
mcc[2] = '\0';
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
u64 iwl_acpi_get_pwr_limit(struct device *dev)
{
union acpi_object *data, *wifi_pkg;
u64 dflt_pwr_limit;
int tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
if (IS_ERR(data)) {
dflt_pwr_limit = 0;
goto out;
}
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
dflt_pwr_limit = 0;
goto out_free;
}
dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
out_free:
kfree(data);
out:
return dflt_pwr_limit;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
*extl_clk = wifi_pkg->package.elements[1].integer.value;
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
static int iwl_sar_set_profile(union acpi_object *table,
struct iwl_sar_profile *profile,
bool enabled, u8 num_chains, u8 num_sub_bands)
{
int i, j, idx = 0;
/*
* The table from ACPI is flat, but we store it in a
* structured array.
*/
for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
} else {
if (table[idx].type != ACPI_TYPE_INTEGER ||
table[idx].integer.value > U8_MAX)
return -EINVAL;
profile->chains[i].subbands[j] =
table[idx].integer.value;
idx++;
}
}
}
/* Only if all values were valid can the profile be enabled */
profile->enabled = enabled;
return 0;
}
static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
__le16 *per_chain, u32 n_subbands,
int prof_a, int prof_b)
{
int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
int i, j;
for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
struct iwl_sar_profile *prof;
/* don't allow SAR to be disabled (profile 0 means disable) */
if (profs[i] == 0)
return -EPERM;
/* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
if (profs[i] > ACPI_SAR_PROFILE_NUM)
return -EINVAL;
/* profiles go from 1 to 4, so decrement to access the array */
prof = &fwrt->sar_profiles[profs[i] - 1];
/* if the profile is disabled, do nothing */
if (!prof->enabled) {
IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
profs[i]);
/*
* if one of the profiles is disabled, we
* ignore all of them and return 1 to
* differentiate disabled from other failures.
*/
return 1;
}
IWL_DEBUG_INFO(fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);
IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
for (j = 0; j < n_subbands; j++) {
per_chain[i * n_subbands + j] =
cpu_to_le16(prof->chains[i].subbands[j]);
IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
j, prof->chains[i].subbands[j]);
}
}
return 0;
}
int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
__le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b)
{
int i, ret = 0;
for (i = 0; i < n_tables; i++) {
ret = iwl_sar_fill_table(fwrt,
&per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
n_subbands, prof_a, prof_b);
if (ret)
break;
}
return ret;
}
IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
bool enabled;
int ret, tbl_rev;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* start by trying to read revision 2 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE_REV2,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
goto read_table;
}
/* then try revision 1 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE_REV1,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV1;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
goto read_table;
}
/* then finally revision 0 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE_REV0,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV0;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
goto read_table;
}
ret = PTR_ERR(wifi_pkg);
goto out_free;
read_table:
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
enabled = !!(wifi_pkg->package.elements[1].integer.value);
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
int i, n_profiles, tbl_rev, pos;
int ret = 0;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* start by trying to read revision 2 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE_REV2,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
goto read_table;
}
/* then try revision 1 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE_REV1,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV1;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
goto read_table;
}
/* then finally revision 0 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE_REV0,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV0;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
goto read_table;
}
ret = PTR_ERR(wifi_pkg);
goto out_free;
read_table:
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
/*
* Check the validity of n_profiles. The EWRD profiles start
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
/* the tables start at element 3 */
pos = 3;
for (i = 0; i < n_profiles; i++) {
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
&fwrt->sar_profiles[i + 1], enabled,
num_chains, num_sub_bands);
if (ret < 0)
break;
/* go to the next table */
pos += num_chains * num_sub_bands;
}
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
u8 num_bands, num_profiles;
static const struct {
u8 revisions;
u8 bands;
u8 profiles;
u8 min_profiles;
} rev_data[] = {
{
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
.min_profiles = 3,
},
{
.revisions = BIT(2),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES,
},
{
.revisions = BIT(0) | BIT(1),
.bands = ACPI_GEO_NUM_BANDS_REV0,
.profiles = ACPI_NUM_GEO_PROFILES,
},
};
int idx;
/* start from one to skip the domain */
int entry_idx = 1;
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3);
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES);
data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* read the highest revision we understand first */
for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) {
/* min_profiles != 0 requires num_profiles header */
u32 hdr_size = 1 + !!rev_data[idx].min_profiles;
u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE *
rev_data[idx].bands;
u32 max_size = hdr_size + profile_size * rev_data[idx].profiles;
u32 min_size;
if (!rev_data[idx].min_profiles)
min_size = max_size;
else
min_size = hdr_size +
profile_size * rev_data[idx].min_profiles;
wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data,
min_size, max_size,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (!(BIT(tbl_rev) & rev_data[idx].revisions))
continue;
num_bands = rev_data[idx].bands;
num_profiles = rev_data[idx].profiles;
if (rev_data[idx].min_profiles) {
/* read header that says # of profiles */
union acpi_object *entry;
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
entry->integer.value > num_profiles) {
ret = -EINVAL;
goto out_free;
}
num_profiles = entry->integer.value;
/*
* this also validates >= min_profiles since we
* otherwise wouldn't have gotten the data when
* looking up in ACPI
*/
if (wifi_pkg->package.count !=
hdr_size + profile_size * num_profiles) {
ret = -EINVAL;
goto out_free;
}
}
goto read_table;
}
}
if (idx < ARRAY_SIZE(rev_data))
ret = PTR_ERR(wifi_pkg);
else
ret = -ENOENT;
goto out_free;
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
union acpi_object *entry;
/*
* num_bands is either 2 or 3, if it's only 2 then
* fill the third band (6 GHz) with the values from
* 5 GHz (second band)
*/
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].max =
fwrt->geo_profiles[i].bands[1].max;
} else {
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
entry->integer.value > U8_MAX) {
ret = -EINVAL;
goto out_free;
}
fwrt->geo_profiles[i].bands[j].max =
entry->integer.value;
}
for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
fwrt->geo_profiles[i].bands[1].chains[k];
} else {
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
entry->integer.value > U8_MAX) {
ret = -EINVAL;
goto out_free;
}
fwrt->geo_profiles[i].bands[j].chains[k] =
entry->integer.value;
}
}
}
}
fwrt->geo_num_profiles = num_profiles;
fwrt->geo_enabled = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
{
/*
* The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
* earlier firmware versions. Unfortunately, we don't have a
* TLV API flag to rely on, so rely on the major version which
* is in the first byte of ucode_ver. This was implemented
* initially on version 38 and then backported to 17. It was
* also backported to 29, but only for 7265D devices. The
* intention was to have it in 36 as well, but not all 8000
* family got this feature enabled. The 8000 family is the
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset *table,
u32 n_bands, u32 n_profiles)
{
int i, j;
if (!iwl_sar_geo_support(fwrt))
return -EOPNOTSUPP;
for (i = 0; i < n_profiles; i++) {
for (j = 0; j < n_bands; j++) {
struct iwl_per_chain_offset *chain =
&table[i * n_bands + j];
chain->max_tx_power =
cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
IWL_DEBUG_RADIO(fwrt,
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
i, j,
fwrt->geo_profiles[i].bands[j].chains[0],
fwrt->geo_profiles[i].bands[j].chains[1],
fwrt->geo_profiles[i].bands[j].max);
}
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u8 value;
__le32 config_bitmap = 0;
/*
** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
*/
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2,
&iwl_guid, &value);
if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
/*
** Evaluate func 'DSM_FUNC_DISABLE_SRD'
*/
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
DSM_FUNC_DISABLE_SRD,
&iwl_guid, &value);
if (!ret) {
if (value == DSM_VALUE_SRD_PASSIVE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
else if (value == DSM_VALUE_SRD_DISABLE)
config_bitmap |=
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
}
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@ -65,10 +65,21 @@
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
* 1 type, 1 enabled, 1 block list size, 16 block list array
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
#define ACPI_WTAS_ENABLED_MSK 0x1
#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
#define ACPI_WTAS_USA_UHB_MSK BIT(16)
#define ACPI_WTAS_USA_UHB_POS 16
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
@ -105,6 +116,11 @@ struct iwl_geo_profile {
struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
};
/* Same thing as with SAR, all revisions fit in revision 2 */
struct iwl_ppag_chain {
s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
};
enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
@ -112,7 +128,8 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
DSM_FUNC_ACTIVATE_CHANNEL = 8
DSM_FUNC_ACTIVATE_CHANNEL = 8,
DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
};
enum iwl_dsm_values_srd {
@ -198,8 +215,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset *table,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array,
int *block_list_size);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
union iwl_tas_config_cmd *cmd, int fw_ver);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
@ -280,8 +297,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *block_list_array,
int *block_list_size)
union iwl_tas_config_cmd *cmd, int fw_ver)
{
return -ENOENT;
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -97,6 +97,21 @@ struct iwl_alive_ntf_v5 {
struct iwl_sku_id sku_id;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
struct iwl_imr_alive_info {
__le64 base_addr;
__le32 size;
__le32 enabled;
} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */
struct iwl_alive_ntf_v6 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data[2];
struct iwl_umac_alive umac_data;
struct iwl_sku_id sku_id;
struct iwl_imr_alive_info imr;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* finishing init flow
@ -142,15 +157,6 @@ enum iwl_card_state_flags {
CARD_IS_RX_ON = 0x10,
};
/**
* struct iwl_radio_version_notif - information on the card state
* ( CARD_STATE_NOTIFICATION = 0xa1 )
* @flags: &enum iwl_card_state_flags
*/
struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
/**
* enum iwl_error_recovery_flags - flags for error recovery cmd
* @ERROR_RECOVERY_UPDATE_DB: update db from blob sent

View File

@ -51,7 +51,7 @@ enum iwl_legacy_cmds {
* @UCODE_ALIVE_NTFY:
* Alive data from the firmware, as described in
* &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or
* &struct iwl_alive_ntf_v5.
* &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6.
*/
UCODE_ALIVE_NTFY = 0x1,
@ -72,7 +72,8 @@ enum iwl_legacy_cmds {
/**
* @PHY_CONTEXT_CMD:
* Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
* Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd
* or &struct iwl_phy_context_cmd_v1.
*/
PHY_CONTEXT_CMD = 0x8,
@ -90,7 +91,8 @@ enum iwl_legacy_cmds {
/**
* @SCAN_CFG_CMD:
* uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
* uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2
* or &struct iwl_scan_config
*/
SCAN_CFG_CMD = 0xc,
@ -320,14 +322,6 @@ enum iwl_legacy_cmds {
*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
* @DC2DC_CONFIG_CMD:
* Set/Get DC2DC frequency tune
* Command is &struct iwl_dc2dc_config_cmd,
* response is &struct iwl_dc2dc_config_resp
*/
DC2DC_CONFIG_CMD = 0x83,
/**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
@ -356,7 +350,7 @@ enum iwl_legacy_cmds {
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics,
* &struct iwl_statistics_operational_ntfy
* &struct iwl_statistics_operational_ntfy_ver_14
*/
STATISTICS_CMD = 0x9c,
@ -365,6 +359,7 @@ enum iwl_legacy_cmds {
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistic,
* &struct iwl_statistics_operational_ntfy_ver_14
* &struct iwl_statistics_operational_ntfy
*/
STATISTICS_NOTIFICATION = 0x9d,
@ -382,13 +377,6 @@ enum iwl_legacy_cmds {
*/
REDUCE_TX_POWER_CMD = 0x9f,
/**
* @CARD_STATE_NOTIFICATION:
* Card state (RF/CT kill) notification,
* uses &struct iwl_card_state_notif
*/
CARD_STATE_NOTIFICATION = 0xa1,
/**
* @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
*/
@ -612,6 +600,16 @@ enum iwl_system_subcmd_ids {
* @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd
*/
RFI_GET_FREQ_TABLE_CMD = 0xc,
/**
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
*/
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
/**
* @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
*/
RFI_DEACTIVATE_NOTIF = 0xff,
};
#endif /* __iwl_fw_api_commands_h__ */

View File

@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
/**
* struct iwl_dc2dc_config_cmd - configure dc2dc values
*
* (DC2DC_CONFIG_CMD = 0x83)
*
* Set/Get & configure dc2dc values.
* The command always returns the current dc2dc values.
*
* @flags: set/get dc2dc
* @enable_low_power_mode: not used.
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_cmd {
__le32 flags;
__le32 enable_low_power_mode; /* not used */
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
*
* Current dc2dc values returned by the FW.
*
* @dc2dc_freq_tune0: frequency divider - digital domain
* @dc2dc_freq_tune1: frequency divider - analog domain
*/
struct iwl_dc2dc_config_resp {
__le32 dc2dc_freq_tune0;
__le32 dc2dc_freq_tune1;
} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
#endif /* __iwl_fw_api_config_h__ */

View File

@ -554,7 +554,7 @@ struct iwl_wowlan_gtk_status_v1 {
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
/**
* struct iwl_wowlan_gtk_status - GTK status
* struct iwl_wowlan_gtk_status_v2 - GTK status
* @key: GTK material
* @key_len: GTK legth, if set to 0, the key is not available
* @key_flags: information about the key:
@ -565,7 +565,7 @@ struct iwl_wowlan_gtk_status_v1 {
* @tkip_mic_key: TKIP RX MIC key
* @rsc: TSC RSC counters
*/
struct iwl_wowlan_gtk_status {
struct iwl_wowlan_gtk_status_v2 {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 key_len;
u8 key_flags;
@ -574,6 +574,41 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
/**
* struct iwl_wowlan_all_rsc_tsc_v5 - key counters
* @ucast_rsc: unicast RSC values
* @mcast_rsc: multicast RSC values (per key map value)
* @sta_id: station ID
* @mcast_key_id_map: map of key id to @mcast_rsc entry
*/
struct iwl_wowlan_all_rsc_tsc_v5 {
__le64 ucast_rsc[IWL_MAX_TID_COUNT];
__le64 mcast_rsc[2][IWL_MAX_TID_COUNT];
__le32 sta_id;
u8 mcast_key_id_map[4];
} __packed; /* ALL_TSC_RSC_API_S_VER_5 */
/**
* struct iwl_wowlan_gtk_status_v3 - GTK status
* @key: GTK material
* @key_len: GTK length, if set to 0, the key is not available
* @key_flags: information about the key:
* bits[0:1]: key index assigned by the AP
* bits[2:6]: GTK index of the key in the internal DB
* bit[7]: Set iff this is the currently used GTK
* @reserved: padding
* @tkip_mic_key: TKIP RX MIC key
* @sc: RSC/TSC counters
*/
struct iwl_wowlan_gtk_status_v3 {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 key_len;
u8 key_flags;
u8 reserved[2];
u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
struct iwl_wowlan_all_rsc_tsc_v5 sc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
/**
@ -640,7 +675,7 @@ struct iwl_wowlan_status_v6 {
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
@ -676,7 +711,7 @@ struct iwl_wowlan_status_v7 {
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status_v9 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
@ -693,6 +728,44 @@ struct iwl_wowlan_status_v9 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
/**
* struct iwl_wowlan_status_v12 - WoWLAN status
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
* @non_qos_seq_ctr: non-QoS sequence counter to use next.
* Reserved if the struct has version >= 10.
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
* @transmitted_ndps: number of transmitted neighbor discovery packets
* @received_beacons: number of received beacons
* @wake_packet_length: wakeup packet length
* @wake_packet_bufsize: wakeup packet buffer size
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @reserved: unused
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status_v12 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
__le16 qos_seq_ctr[8];
__le32 wakeup_reasons;
__le32 num_of_gtk_rekeys;
__le32 transmitted_ndps;
__le32 received_beacons;
__le32 wake_packet_length;
__le32 wake_packet_bufsize;
u8 tid_tear_down;
u8 reserved[3];
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */

View File

@ -31,13 +31,18 @@ enum iwl_data_path_subcmd_ids {
*/
STA_HE_CTXT_CMD = 0x7,
/**
* @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd
*/
RLC_CONFIG_CMD = 0x8,
/**
* @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
*/
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
*/
TLC_MNG_CONFIG_CMD = 0xF,
@ -52,6 +57,20 @@ enum iwl_data_path_subcmd_ids {
*/
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
* @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
* blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
* command, and &struct iwl_rx_baid_cfg_resp as a response.
*/
RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
/**
* @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
* command, uses &struct iwl_scd_queue_cfg_cmd and the response
* is (same as before) &struct iwl_tx_queue_cfg_rsp.
*/
SCD_QUEUE_CONFIG_CMD = 0x17,
/**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
@ -195,4 +214,193 @@ struct iwl_thermal_dual_chain_request {
__le32 event;
} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
enum iwl_rlc_chain_info {
IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0),
IWL_RLC_CHAIN_INFO_VALID = 0x000e,
IWL_RLC_CHAIN_INFO_FORCE = 0x0070,
IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380,
IWL_RLC_CHAIN_INFO_COUNT = 0x0c00,
IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000,
};
/**
* struct iwl_rlc_properties - RLC properties
* @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info
* @reserved: reserved
*/
struct iwl_rlc_properties {
__le32 rx_chain_info;
__le32 reserved;
} __packed; /* RLC_PROPERTIES_S_VER_1 */
enum iwl_sad_mode {
IWL_SAD_MODE_ENABLED = BIT(0),
IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6,
IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0,
IWL_SAD_MODE_DEFAULT_ANT_A = 0x2,
IWL_SAD_MODE_DEFAULT_ANT_B = 0x4,
};
/**
* struct iwl_sad_properties - SAD properties
* @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode
* @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode
* @mac_id: MAC index
* @reserved: reserved
*/
struct iwl_sad_properties {
__le32 chain_a_sad_mode;
__le32 chain_b_sad_mode;
__le32 mac_id;
__le32 reserved;
} __packed;
/**
* struct iwl_rlc_config_cmd - RLC configuration
* @phy_id: PHY index
* @rlc: RLC properties, &struct iwl_rlc_properties
* @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties
* @flags: flags, &enum iwl_rlc_flags
* @reserved: reserved
*/
struct iwl_rlc_config_cmd {
__le32 phy_id;
struct iwl_rlc_properties rlc;
struct iwl_sad_properties sad;
u8 flags;
u8 reserved[3];
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
/**
* enum iwl_rx_baid_action - BAID allocation/config action
* @IWL_RX_BAID_ACTION_ADD: add a new BAID session
* @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
* @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
*/
enum iwl_rx_baid_action {
IWL_RX_BAID_ACTION_ADD,
IWL_RX_BAID_ACTION_MODIFY,
IWL_RX_BAID_ACTION_REMOVE,
}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
* @sta_id_mask: station ID mask
* @tid: the TID for this session
* @reserved: reserved
* @ssn: the starting sequence number
* @win_size: RX BA session window size
*/
struct iwl_rx_baid_cfg_cmd_alloc {
__le32 sta_id_mask;
u8 tid;
u8 reserved[3];
__le16 ssn;
__le16 win_size;
} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
* @old_sta_id_mask: old station ID mask
* @new_sta_id_mask: new station ID mask
* @tid: TID of the BAID
*/
struct iwl_rx_baid_cfg_cmd_modify {
__le32 old_sta_id_mask;
__le32 new_sta_id_mask;
__le32 tid;
} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
* @baid: the BAID to remove
*/
struct iwl_rx_baid_cfg_cmd_remove_v1 {
__le32 baid;
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
/**
* struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
* @sta_id_mask: the station mask of the BAID to remove
* @tid: the TID of the BAID to remove
*/
struct iwl_rx_baid_cfg_cmd_remove {
__le32 sta_id_mask;
__le32 tid;
} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
union {
struct iwl_rx_baid_cfg_cmd_alloc alloc;
struct iwl_rx_baid_cfg_cmd_modify modify;
struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
struct iwl_rx_baid_cfg_cmd_remove remove;
}; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_rx_baid_cfg_resp - BAID allocation response
* @baid: the allocated BAID
*/
struct iwl_rx_baid_cfg_resp {
__le32 baid;
}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
/**
* enum iwl_scd_queue_cfg_operation - scheduler queue operation
* @IWL_SCD_QUEUE_ADD: allocate a new queue
* @IWL_SCD_QUEUE_REMOVE: remove a queue
* @IWL_SCD_QUEUE_MODIFY: modify a queue
*/
enum iwl_scd_queue_cfg_operation {
IWL_SCD_QUEUE_ADD = 0,
IWL_SCD_QUEUE_REMOVE = 1,
IWL_SCD_QUEUE_MODIFY = 2,
};
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
* @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
* %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
* @u.add.cb_size: size code
* @u.add.bc_dram_addr: byte-count table IOVA
* @u.add.tfdq_dram_addr: TFD queue IOVA
* @u.remove.queue: queue ID for removal
* @u.modify.sta_mask: new station mask for modify
* @u.modify.queue: queue ID to modify
*/
struct iwl_scd_queue_cfg_cmd {
__le32 operation;
union {
struct {
__le32 sta_mask;
u8 tid;
u8 reserved[3];
__le32 flags;
__le32 cb_size;
__le64 bc_dram_addr;
__le64 tfdq_dram_addr;
} __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
struct {
__le32 queue;
} __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
struct {
__le32 sta_mask;
__le32 queue;
} __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
} __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
#endif /* __iwl_fw_api_datapath_h__ */

View File

@ -1,18 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
#include <linux/bitops.h>
#define IWL_FW_INI_HW_SMEM_REGION_ID 15
#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
/**
* struct iwl_fw_ini_hcmd
@ -124,6 +124,9 @@ struct iwl_fw_ini_region_internal_buffer {
* @hdr: debug header
* @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @sub_type: region sub type
* @sub_type_ver: region sub type version
* @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
* &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
@ -146,7 +149,10 @@ struct iwl_fw_ini_region_internal_buffer {
struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header hdr;
__le32 id;
__le32 type;
u8 type;
u8 sub_type;
u8 sub_type_ver;
u8 reserved;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_dev_addr dev_addr;
@ -244,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* struct iwl_fw_ini_conf_tlv - preset configuration TLV
* struct iwl_fw_ini_addr_val - Address and value to set it to
*
* @address: the base address
* @value: value to set at address
*/
struct iwl_fw_ini_addr_val {
__le32 address;
@ -306,6 +311,7 @@ enum iwl_fw_ini_config_set_type {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@ -313,6 +319,7 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
IWL_FW_INI_ALLOCATION_ID_DBGC4,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@ -379,6 +386,17 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
enum iwl_fw_ini_region_device_memory_subtype {
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20,
}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */
/**
* enum iwl_fw_ini_time_point
*
@ -457,6 +475,7 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@ -464,5 +483,46 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
};
/**
* enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset
*
* @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default)
* @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW
* @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW
*/
enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_NOTHING = 0,
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
/**
* enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
*
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
};
/**
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
* @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
IWL_FW_INI_DUMP_BRIEF,
IWL_FW_INI_DUMP_MEDIUM,
IWL_FW_INI_DUMP_VERBOSE,
};
#endif

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -42,6 +42,12 @@ enum iwl_debug_cmds {
* &struct iwl_buf_alloc_cmd
*/
BUFFER_ALLOCATION = 0x8,
/**
* @FW_DUMP_COMPLETE_CMD:
* sends command to fw once dump collection completed
* &struct iwl_dbg_dump_complete_cmd
*/
FW_DUMP_COMPLETE_CMD = 0xB,
/**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
__le32 enabled_severities;
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
/**
* struct iwl_dbg_dump_complete_cmd - dump complete cmd
*
* @tp: timepoint whose dump has completed
* @tp_data: timepoint data
*/
struct iwl_dbg_dump_complete_cmd {
__le32 tp;
__le32 tp_data;
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_debug_h__ */

View File

@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
/**
* @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
*/
CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
CHANNEL_SWITCH_START_NOTIF = 0xFF,
/**
*@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
*/
CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
#define CS_ERR_COUNT_ERROR BIT(0)
#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
#define CS_ERR_LONG_TX_BLOCK BIT(2)
#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
/**
* struct iwl_channel_switch_error_notif - Channel switch error notification
*
* @mac_id: the mac for which the ucode sends the notification for
* @csa_err_mask: mask of channel switch error that can occur
*/
struct iwl_channel_switch_error_notif {
__le32 mac_id;
__le32 csa_err_mask;
} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
/**
* struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
*
* @mac_id: the mac that should cancel the channel switch
*/
struct iwl_cancel_channel_switch_cmd {
__le32 mac_id;
} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
/**
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
*

View File

@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
#define MAX_HE_CHANNEL_BW_INDX 4
#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
/**
* struct iwl_he_pkt_ext - QAM thresholds
* struct iwl_he_pkt_ext_v1 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
} __packed; /* PKT_EXT_DOT11AX_API_S */
struct iwl_he_pkt_ext_v1 {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
* struct iwl_he_pkt_ext_v2 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
* Each entry contains 2 QAM thresholds for 8us and 16us:
* 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
* QAM_tx < QAM_th1 --> PPE=0us
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
* QAM_th2 <= QAM_tx --> PPE=16us
* @pkt_ext_qam_th: QAM thresholds
* For each Nss/Bw define 2 QAM thrsholds (0..5)
* For rates below the low_th, no need for PPE
* For rates between low_th and high_th, need 8us PPE
* For rates equal or higher then the high_th, need 16us PPE
* Nss (0-siso, 1-mimo2) x
* BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
* @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
* of threshold
* @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
* @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
* extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
};
/**
@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext pkt_ext;
struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
/**
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
* struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
struct iwl_he_sta_context_cmd {
struct iwl_he_sta_context_cmd_v2 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext pkt_ext;
struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@ -642,6 +676,81 @@ struct iwl_he_sta_context_cmd {
u8 reserved4[3];
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
* @reserved1: reserved byte for future use
* @reserved2: reserved byte for future use
* @flags: see %iwl_11ax_sta_ctxt_flags
* @ref_bssid_addr: reference BSSID used by the AP
* @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
* @htc_flags: which features are supported in HTC
* @frag_flags: frag support in A-MSDU
* @frag_level: frag support level
* @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
* @frag_min_size: min frag size (except last frag)
* @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @htc_trig_based_pkt_ext: default PE in 4us units
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
* @puncture_mask: puncture mask for EHT
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
* @max_bssid_indicator: indicator of the max bssid supported on the associated
* bss
* @bssid_index: index of the associated VAP
* @ema_ap: AP supports enhanced Multi BSSID advertisement
* @profile_periodicity: number of Beacon periods that are needed to receive the
* complete VAPs info
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
struct iwl_he_sta_context_cmd_v3 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
u8 reserved2;
__le32 flags;
/* The below fields are set via Multiple BSSID IE */
u8 ref_bssid_addr[6];
__le16 reserved0;
/* The below fields are set via HE-capabilities IE */
__le32 htc_flags;
u8 frag_flags;
u8 frag_level;
u8 frag_max_num;
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext_v2 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
u8 htc_trig_based_pkt_ext;
__le16 frame_time_rts_th;
/* Random access parameter set (i.e. RAPS) */
u8 rand_alloc_ecwmin;
u8 rand_alloc_ecwmax;
__le16 puncture_mask;
/* The below fields are set via MU EDCA parameter set element */
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
u8 max_bssid_indicator;
u8 bssid_index;
u8 ema_ap;
u8 profile_periodicity;
u8 bssid_count;
u8 reserved4[3];
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
* @bssid: the BSSID to sniff for

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -33,6 +33,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
*/
TAS_CONFIG = 0x3,
/**
* @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd
*/
SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
/**
* @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
*/
@ -388,17 +393,56 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
#define IWL_TAS_BLACK_LIST_MAX 16
#define IWL_TAS_BLOCK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd - configures the TAS
* struct iwl_tas_config_cmd_v2 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: block list countries (without TAS)
* @block_list_array: list of countries where TAS must be disabled
*/
struct iwl_tas_config_cmd {
struct iwl_tas_config_cmd_v2 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLACK_LIST_MAX];
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
* @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
union iwl_tas_config_cmd {
struct iwl_tas_config_cmd_v2 v2;
struct iwl_tas_config_cmd_v3 v3;
struct iwl_tas_config_cmd_v4 v4;
};
/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
@ -494,6 +538,32 @@ struct iwl_lari_config_change_cmd_v5 {
__le32 chan_state_active_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
/**
* struct iwl_lari_config_change_cmd_v6 - change LARI configuration
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
* @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
* per country, one to indicate whether to override and the other to
* indicate the value to use.
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
* per country, one to indicate whether to override and the other to
* indicate allow/disallow unii4 channels.
* @chan_state_active_bitmap: Bitmap for overriding channel state to active.
* Each bit represents a country or region to activate, according to the BIOS
* definitions.
* @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
* Each bit represents a set of channels in a specific band that should be disabled
*/
struct iwl_lari_config_change_cmd_v6 {
__le32 config_bitmap;
__le32 oem_uhb_allow_bitmap;
__le32 oem_11ax_allow_bitmap;
__le32 oem_unii4_allow_bitmap;
__le32 chan_state_active_bitmap;
__le32 force_disable_channels_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -150,11 +150,12 @@ struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* PHY_CONTEXT_DATA_API_S_VER_3 */
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
__le32 rxchain_info;
__le32 rxchain_info; /* reserved in _VER_4 */
__le32 dsp_cfg_flags;
__le32 reserved;
} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */

View File

@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
/**
* struct ct_kill_notif - CT-kill entry notification
* This structure represent both versions of this notification.
*
* @temperature: the current temperature in celsius
* @reserved: reserved
* @dts: only in v2: DTS that trigger the CT Kill bitmap:
* bit 0: ToP master
* bit 1: PA chain A master
* bit 2: PA chain B master
* bit 3: ToP slave
* bit 4: PA chain A slave
* bit 5: PA chain B slave)
* bits 6,7: reserved (set to 0)
* @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
*/
struct ct_kill_notif {
__le16 temperature;
__le16 reserved;
} __packed; /* GRP_PHY_CT_KILL_NTF */
u8 dts;
u8 scheme;
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations

View File

@ -419,7 +419,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
* struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v2 {
__le32 ops;
@ -431,7 +431,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
* struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
@ -443,7 +443,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
* struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v4 {
__le32 ops;
@ -455,7 +455,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
* struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v5 {
__le32 ops;
@ -503,6 +503,20 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
/**
* struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD
* @offset_map: mapping a mcc to a geo sar group
* @reserved: reserved
*/
struct iwl_sar_offset_mapping_cmd {
u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE]
[MCC_TO_SAR_OFFSET_TABLE_COL_SIZE];
u16 reserved;
} __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
/**
* struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
*
* @reason: used only for a log message
*/
struct iwl_rfi_deactivate_notif {
__le32 reason;
} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@ -116,13 +116,24 @@ enum IWL_TLC_MNG_NSS {
IWL_TLC_NSS_MAX
};
enum IWL_TLC_HT_BW_RATES {
IWL_TLC_HT_BW_NONE_160,
IWL_TLC_HT_BW_160,
/**
* enum IWL_TLC_MCS_PER_BW - mcs index per BW
* @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
* @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
* @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
* @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
* @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
*/
enum IWL_TLC_MCS_PER_BW {
IWL_TLC_MCS_PER_BW_80,
IWL_TLC_MCS_PER_BW_160,
IWL_TLC_MCS_PER_BW_320,
IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1,
IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1,
};
/**
* struct tlc_config_cmd - TLC configuration
* struct iwl_tlc_config_cmd_v3 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@ -131,8 +142,8 @@ enum IWL_TLC_HT_BW_RATES {
* @amsdu: TX amsdu is supported
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
* pair (0 - 80mhz width and below, 1 - 160mhz).
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW
* <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(@enum iwl_tlc_mng_cfg_cw)
@ -140,7 +151,7 @@ enum IWL_TLC_HT_BW_RATES {
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
struct iwl_tlc_config_cmd {
struct iwl_tlc_config_cmd_v3 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
@ -149,13 +160,44 @@ struct iwl_tlc_config_cmd {
u8 amsdu;
__le16 flags;
__le16 non_ht_rates;
__le16 ht_rates[IWL_TLC_NSS_MAX][2];
__le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
u8 reserved2;
__le32 max_tx_op;
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
* struct iwl_tlc_config_cmd_v4 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
* @mode: &enum iwl_tlc_mng_cfg_mode
* @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(&enum iwl_tlc_mng_cfg_cw)
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
* pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
struct iwl_tlc_config_cmd_v4 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
u8 mode;
u8 chains;
u8 sgi_ch_width_supp;
__le16 flags;
__le16 non_ht_rates;
__le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
__le16 max_mpdu_len;
__le16 max_tx_op;
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update

View File

@ -756,7 +756,11 @@ struct iwl_rxq_sync_cmd {
__le32 flags;
__le32 rxq_mask;
__le32 count;
#if defined(__linux__)
u8 payload[];
#elif defined(__FreeBSD__)
u8 payload[0];
#endif
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
@ -768,7 +772,11 @@ struct iwl_rxq_sync_cmd {
*/
struct iwl_rxq_sync_notification {
__le32 count;
#if defined(__linux__)
u8 payload[];
#elif defined(__FreeBSD__)
u8 payload[0];
#endif
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
@ -846,7 +854,11 @@ struct iwl_rfh_queue_data {
struct iwl_rfh_queue_config {
u8 num_queues;
u8 reserved[3];
#if defined(__linux__)
struct iwl_rfh_queue_data data[];
#elif defined(__FreeBSD__)
struct iwl_rfh_queue_data data[0];
#endif
} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
#endif /* __iwl_fw_api_rx_h__ */

View File

@ -82,6 +82,16 @@ enum iwl_scan_offload_band_selection {
IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
};
enum iwl_scan_offload_auth_alg {
IWL_AUTH_ALGO_UNSUPPORTED = 0x00,
IWL_AUTH_ALGO_NONE = 0x01,
IWL_AUTH_ALGO_PSK = 0x02,
IWL_AUTH_ALGO_8021X = 0x04,
IWL_AUTH_ALGO_SAE = 0x08,
IWL_AUTH_ALGO_8021X_SHA384 = 0x10,
IWL_AUTH_ALGO_OWE = 0x20,
};
/**
* struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
* @ssid_index: index to ssid list in fixed part
@ -201,7 +211,7 @@ struct iwl_scan_channel_cfg_lmac {
__le32 iter_interval;
} __packed;
/*
/**
* struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
* @offset: offset in the data block
* @len: length of the segment
@ -211,7 +221,8 @@ struct iwl_scan_probe_segment {
__le16 len;
} __packed;
/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
/**
* struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
@ -224,7 +235,8 @@ struct iwl_scan_probe_req_v1 {
u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
} __packed;
/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
/**
* struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
@ -247,7 +259,8 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
/**
* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
* @flags: enum iwl_scan_channel_flags
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
* involved.
@ -492,7 +505,7 @@ struct iwl_scan_dwell {
} __packed;
/**
* struct iwl_scan_config_v1
* struct iwl_scan_config_v1 - scan configuration command
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@ -524,6 +537,21 @@ struct iwl_scan_config_v1 {
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
/**
* struct iwl_scan_config_v2 - scan configuration command
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
* @legacy_rates: default legacy rates - enum scan_config_rates
* @out_of_channel_time: default max out of serving channel time
* @suspend_time: default max suspend time
* @dwell: dwells for the scan
* @mac_addr: default mac address to be used in probes
* @bcast_sta_id: the index of the station in the fw
* @channel_flags: default channel flags - enum iwl_channel_flags
* scan_config_channel_flag
* @channel_array: default supported channels
*/
struct iwl_scan_config_v2 {
__le32 flags;
__le32 tx_chains;
@ -539,7 +567,7 @@ struct iwl_scan_config_v2 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
/**
* struct iwl_scan_config
* struct iwl_scan_config - scan configuration command
* @enable_cam_mode: whether to enable CAM mode.
* @enable_promiscouos_mode: whether to enable promiscouos mode
* @bcast_sta_id: the index of the station in the fw. Deprecated starting with
@ -640,6 +668,10 @@ enum iwl_umac_scan_general_flags2 {
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case
* &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is
* activated over 6GHz PSC channels, filter in beacons and probe responses.
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum
* rate of 5.5Mpbs, filter in broadcast probe responses and set the max
* channel time indication field in the FILS request parameters element
* (if included by the driver in the probe request IEs).
*/
enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
@ -657,6 +689,20 @@ enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14),
IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15),
};
/**
* enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2
*
* @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling
* should be aware of a P2P GO operation on the 2GHz band.
* @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling
* should be aware of a P2P GO operation on the 5GHz or 6GHz band.
*/
enum iwl_umac_scan_general_params_flags2 {
IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0),
IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1),
};
/**
@ -941,8 +987,8 @@ struct iwl_scan_channel_params_v6 {
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
/**
* struct iwl_scan_general_params_v10
* @flags: &enum iwl_umac_scan_flags
* struct iwl_scan_general_params_v11
* @flags: &enum iwl_umac_scan_general_flags_v2
* @reserved: reserved for future
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @active_dwell: dwell time for active scan per LMAC
@ -952,7 +998,8 @@ struct iwl_scan_channel_params_v6 {
* for 5GHz channels
* @adwell_default_social_chn: adaptive dwell default number of
* APs per social channel
* @reserved1: reserved for future
* @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2.
* Otherwise reserved.
* @adwell_max_budget: the maximal number of TUs that adaptive dwell
* can add to the total scan time
* @max_out_of_time: max out of serving channel time, per LMAC
@ -963,7 +1010,7 @@ struct iwl_scan_channel_params_v6 {
* @num_of_fragments: number of fragments needed for full fragmented
* scan coverage.
*/
struct iwl_scan_general_params_v10 {
struct iwl_scan_general_params_v11 {
__le16 flags;
u8 reserved;
u8 scan_start_mac_id;
@ -971,14 +1018,14 @@ struct iwl_scan_general_params_v10 {
u8 adwell_default_2g;
u8 adwell_default_5g;
u8 adwell_default_social_chn;
u8 reserved1;
u8 flags2;
__le16 adwell_max_budget;
__le32 max_out_of_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
u8 passive_dwell[SCAN_TWO_LMACS];
u8 num_of_fragments[SCAN_TWO_LMACS];
} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */
/**
* struct iwl_scan_periodic_parms_v1
@ -994,31 +1041,31 @@ struct iwl_scan_periodic_parms_v1 {
/**
* struct iwl_scan_req_params_v12
* @general_params: &struct iwl_scan_general_params_v10
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v4
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v3
*/
struct iwl_scan_req_params_v12 {
struct iwl_scan_general_params_v10 general_params;
struct iwl_scan_general_params_v11 general_params;
struct iwl_scan_channel_params_v4 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v3 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
* struct iwl_scan_req_params_v14
* @general_params: &struct iwl_scan_general_params_v10
* struct iwl_scan_req_params_v15
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v6
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v4
*/
struct iwl_scan_req_params_v14 {
struct iwl_scan_general_params_v10 general_params;
struct iwl_scan_req_params_v15 {
struct iwl_scan_general_params_v11 general_params;
struct iwl_scan_channel_params_v6 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */
/**
* struct iwl_scan_req_umac_v12
@ -1033,16 +1080,16 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
* struct iwl_scan_req_umac_v14
* struct iwl_scan_req_umac_v15
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
struct iwl_scan_req_umac_v14 {
struct iwl_scan_req_umac_v15 {
__le32 uid;
__le32 ooc_priority;
struct iwl_scan_req_params_v14 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
struct iwl_scan_req_params_v15 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */
/**
* struct iwl_umac_scan_abort

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -432,6 +432,7 @@ enum iwl_fw_statistics_type {
FW_STATISTICS_HE,
}; /* FW_STATISTICS_TYPE_API_E_VER_1 */
#define IWL_STATISTICS_TYPE_MSK 0x7f
/**
* struct iwl_statistics_ntfy_hdr
*
@ -445,11 +446,98 @@ struct iwl_statistics_ntfy_hdr {
__le16 size;
}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
/**
* struct iwl_statistics_ntfy_per_mac
*
* @beacon_filter_average_energy: Average energy [-dBm] of the 2
* antennas.
* @air_time: air time
* @beacon_counter: all beacons (both filtered and not filtered)
* @beacon_average_energy: all beacons (both filtered and not
* filtered)
* @beacon_rssi_a: beacon RSSI on antenna A
* @beacon_rssi_b: beacon RSSI on antenna B
* @rx_bytes: RX byte count
*/
struct iwl_statistics_ntfy_per_mac {
__le32 beacon_filter_average_energy;
__le32 air_time;
__le32 beacon_counter;
__le32 beacon_average_energy;
__le32 beacon_rssi_a;
__le32 beacon_rssi_b;
__le32 rx_bytes;
} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */
#define IWL_STATS_MAX_BW_INDEX 5
/** struct iwl_statistics_ntfy_per_phy
* @channel_load: channel load
* @channel_load_by_us: device contribution to MCLM
* @channel_load_not_by_us: other devices' contribution to MCLM
* @clt: CLT HW timer (TIM_CH_LOAD2)
* @act: active accumulator SW
* @elp: elapsed time accumulator SW
* @rx_detected_per_ch_width: number of deferred TX per channel width,
* 0 - 20, 1/2/3 - 40/80/160
* @success_per_ch_width: number of frames that got ACK/BACK/CTS
* per channel BW. note, BACK counted as 1
* @fail_per_ch_width: number of frames that didn't get ACK/BACK/CTS
* per channel BW. note BACK counted as 1
* @last_tx_ch_width_indx: last txed frame channel width index
*/
struct iwl_statistics_ntfy_per_phy {
__le32 channel_load;
__le32 channel_load_by_us;
__le32 channel_load_not_by_us;
__le32 clt;
__le32 act;
__le32 elp;
__le32 rx_detected_per_ch_width[IWL_STATS_MAX_BW_INDEX];
__le32 success_per_ch_width[IWL_STATS_MAX_BW_INDEX];
__le32 fail_per_ch_width[IWL_STATS_MAX_BW_INDEX];
__le32 last_tx_ch_width_indx;
} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
/**
* struct iwl_statistics_ntfy_per_sta
*
* @average_energy: in fact it is minus the energy..
*/
struct iwl_statistics_ntfy_per_sta {
__le32 average_energy;
} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
#define IWL_STATS_MAX_PHY_OPERTINAL 3
/**
* struct iwl_statistics_operational_ntfy
*
* @hdr: general statistics header
* @flags: bitmap of possible notification structures
* @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac
* @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy
* @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta
* @rx_time: rx time
* @tx_time: usec the radio is transmitting.
* @on_time_rf: The total time in usec the RF is awake.
* @on_time_scan: usec the radio is awake due to scan.
*/
struct iwl_statistics_operational_ntfy {
struct iwl_statistics_ntfy_hdr hdr;
__le32 flags;
struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX];
struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL];
struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX];
__le64 rx_time;
__le64 tx_time;
__le64 on_time_rf;
__le64 on_time_scan;
} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */
/**
* struct iwl_statistics_operational_ntfy_ver_14
*
* @hdr: general statistics header
* @flags: bitmap of possible notification structures
* @mac_id: mac on which the beacon was received
* @beacon_filter_average_energy: Average energy [-dBm] of the 2
* antennas.
@ -469,7 +557,7 @@ struct iwl_statistics_ntfy_hdr {
* @average_energy: in fact it is minus the energy..
* @reserved: reserved
*/
struct iwl_statistics_operational_ntfy {
struct iwl_statistics_operational_ntfy_ver_14 {
struct iwl_statistics_ntfy_hdr hdr;
__le32 flags;
__le32 mac_id;

View File

@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2019-2020 Intel Corporation
* Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_soc_h__
#define __iwl_fw_api_soc_h__
#ifndef __iwl_fw_api_system_h__
#define __iwl_fw_api_system_h__
#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
@ -32,4 +32,12 @@ struct iwl_soc_configuration_cmd {
* SOC_CONFIGURATION_CMD_S_VER_2
*/
#endif /* __iwl_fw_api_soc_h__ */
/**
* struct iwl_system_features_control_cmd - system features control command
* @features: bitmap of features to disable
*/
struct iwl_system_features_control_cmd {
__le32 features[4];
} __packed; /* SYSTEM_FEATURES_CONTROL_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_system_h__ */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@ -177,6 +177,17 @@ enum iwl_tx_offload_assist_flags_pos {
#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
enum iwl_tx_offload_assist_bz {
IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff,
IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800,
IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000,
IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000,
IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000,
IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000,
IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000,
IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000,
};
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
* struct iwl_tx_cmd - TX command struct to FW
@ -243,8 +254,10 @@ struct iwl_tx_cmd {
u8 tid_tspec;
__le16 pm_frame_timeout;
__le16 reserved4;
u8 payload[0];
struct ieee80211_hdr hdr[0];
union {
DECLARE_FLEX_ARRAY(u8, payload);
DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
};
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@ -283,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
* @ttl: time to live - packet lifetime limit. The FW should drop if
* passed.
* @reserved: reserved
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
@ -293,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
__le64 ttl;
u8 reserved[8];
struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_8,
TX_CMD_API_S_VER_10 */
@ -720,8 +732,10 @@ struct iwl_mvm_compressed_ba_notif {
__le32 tx_rate;
__le16 tfd_cnt;
__le16 ra_tid_cnt;
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
struct iwl_mvm_compressed_ba_tfd tfd[];
union {
DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
};
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
COMPRESSED_BA_RES_API_S_VER_5 */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2019-2020 Intel Corporation
* Copyright (C) 2005-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
#define IWL_CMD_QUEUE_SIZE 32

View File

@ -15,7 +15,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
#include "iwl-fh.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
@ -306,9 +306,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_release_nic_access(fwrt->trans);
}
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
struct iwl_prph_range {
u32 start, end;
};
@ -883,7 +880,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_info->hw_type =
cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
dump_info->hw_step =
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
cpu_to_le32(fwrt->trans->hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
strncpy(dump_info->dev_human_readable, fwrt->trans->name,
@ -1035,7 +1032,7 @@ struct iwl_dump_ini_region_data {
static int
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1060,7 +1057,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1110,7 +1107,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1129,7 +1126,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@ -1161,7 +1158,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1173,8 +1170,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->dev_addr.size));
if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) ==
IWL_FW_INI_HW_SMEM_REGION_ID &&
if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM &&
fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
range->data,
@ -1184,7 +1180,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1204,7 +1200,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
@ -1213,7 +1209,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
idx++;
if (!fwrt->trans->trans_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@ -1229,7 +1225,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1248,7 +1244,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1316,7 +1312,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1451,7 +1447,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1518,7 +1514,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@ -1537,7 +1533,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_special_device_memory *special_mem =
@ -1558,7 +1554,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@ -1570,10 +1566,8 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->range_data_size = reg->dev_addr.size;
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph(fwrt->trans, (i % 2) ?
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
if (prph_data == 0x5a5a5a5a) {
@ -1588,7 +1582,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@ -1607,10 +1601,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, u32 range_len, int idx)
{
/* read the IMR memory and DMA it to SRAM */
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
range->range_data_size = cpu_to_le32(size_to_dump);
if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
imr_curr_addr, size_to_dump)) {
IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
return -1;
}
fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
size_to_dump);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static void *
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
@ -1686,7 +1707,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@ -1697,7 +1718,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@ -1705,10 +1726,21 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
&fwrt->trans->cfg->mon_smem_regs);
}
static void *
iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
&fwrt->trans->cfg->mon_dbgi_regs);
}
static void *
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_err_table_dump *dump = data;
@ -1722,7 +1754,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data)
void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_special_device_memory *dump = data;
@ -1734,6 +1766,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->data;
}
static void *
iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
return dump->data;
}
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1793,6 +1837,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
return 1;
}
static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
/* range is total number of pages need to copied from
*IMR memory to SRAM and later from SRAM to DRAM
*/
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
IWL_DEBUG_INFO(fwrt,
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
imr_enable, imr_size, sram_size);
return 0;
}
return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
}
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1870,6 +1934,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 size = le32_to_cpu(reg->dev_addr.size);
u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
if (!size || !ranges)
return 0;
return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
(size + sizeof(struct iwl_fw_ini_error_dump_range));
}
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@ -1957,6 +2035,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
static u32
iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
u32 size = 0;
u32 ranges = 0;
u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
IWL_DEBUG_INFO(fwrt,
"WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
imr_enable, imr_size, sram_size);
return size;
}
size = imr_size;
ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
if (!size && !ranges) {
IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
return 0;
}
size += sizeof(struct iwl_fw_ini_error_dump) +
ranges * sizeof(struct iwl_fw_ini_error_dump_range);
return size;
}
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@ -1973,10 +2078,10 @@ struct iwl_dump_ini_mem_ops {
struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *data);
void *data, u32 data_len);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range, int idx);
void *range, u32 range_len, int idx);
};
/**
@ -1996,26 +2101,56 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_dump_entry *entry;
struct iwl_fw_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
u32 type = reg->type;
u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
u32 num_of_ranges, i, size;
void *range;
u8 *range;
u32 free_size;
u64 header_size;
u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
/*
* The higher part of the ID in version 2 is irrelevant for
* us, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) == 2)
id &= IWL_FW_INI_REGION_V2_MASK;
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
dump_policy, id, type);
if (le32_to_cpu(reg->hdr.version) >= 2) {
u32 dp = le32_get_bits(reg->id,
IWL_FW_INI_REGION_DUMP_POLICY_MASK);
if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
} else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
} else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
!(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
IWL_DEBUG_FW(fwrt,
"WRT: no dump - type %d and policy mismatch=%d\n",
dump_policy, dp);
return 0;
}
}
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
!ops->fill_range)
!ops->fill_range) {
IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
return 0;
}
size = ops->get_size(fwrt, reg_data);
if (!size)
if (size < sizeof(*header)) {
IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
return 0;
}
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
@ -2025,11 +2160,11 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv = (void *)entry->data;
tlv->type = reg->type;
tlv->sub_type = reg->sub_type;
tlv->sub_type_ver = reg->sub_type_ver;
tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
type);
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
@ -2038,7 +2173,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
range = ops->fill_mem_hdr(fwrt, reg_data, header);
free_size = size;
range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
@ -2046,8 +2182,26 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
goto out_err;
}
header_size = range - (u8 *)header;
if (WARN(header_size > free_size,
#if defined(__linux__)
"header size %llu > free_size %d",
header_size, free_size)) {
#elif defined(__FreeBSD__)
"header size %ju > free_size %d",
(uintmax_t)header_size, free_size)) {
#endif
IWL_ERR(fwrt,
"WRT: fill_mem_hdr used more than given free_size\n");
goto out_err;
}
free_size -= header_size;
for (i = 0; i < num_of_ranges; i++) {
int range_size = ops->fill_range(fwrt, reg_data, range, i);
int range_size = ops->fill_range(fwrt, reg_data, range,
free_size, i);
if (range_size < 0) {
IWL_ERR(fwrt,
@ -2055,7 +2209,16 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
id, type);
goto out_err;
}
range = (u8 *)range + range_size;
if (WARN(range_size > free_size, "range_size %d > free_size %d",
range_size, free_size)) {
IWL_ERR(fwrt,
"WRT: fill_raged used more than given free_size\n");
goto out_err;
}
free_size -= range_size;
range = range + range_size;
}
list_add_tail(&entry->list, list);
@ -2107,7 +2270,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
/*
* Several HWs all have type == 0x42, so we'll override this value
@ -2115,7 +2278,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
*/
hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
if (hw_type == IWL_AX210_HW_TYPE) {
u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2);
u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
u32 masked_bits = is_jacket | (is_cdb << 1);
@ -2245,7 +2408,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_csr_iter,
},
[IWL_FW_INI_REGION_DRAM_IMR] = {},
[IWL_FW_INI_REGION_DRAM_IMR] = {
.get_num_of_ranges = iwl_dump_ini_imr_ranges,
.get_size = iwl_dump_ini_imr_get_size,
.fill_mem_hdr = iwl_dump_ini_imr_fill_header,
.fill_range = iwl_dump_ini_imr_iter,
},
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
@ -2260,8 +2428,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
[IWL_FW_INI_REGION_DBGI_SRAM] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.get_size = iwl_dump_ini_mon_dbgi_get_size,
.fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
};
@ -2299,7 +2467,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
}
reg = (void *)reg_data.reg_tlv->data;
reg_type = le32_to_cpu(reg->type);
reg_type = reg->type;
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
@ -2450,9 +2618,9 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
#if defined(__linux__)
struct list_head dump_list = LIST_HEAD_INIT(dump_list);
LIST_HEAD(dump_list);
#elif defined(__FreeBSD__)
struct list_head dump_list = LINUX_LIST_HEAD_INIT(dump_list);
LINUX_LIST_HEAD(dump_list);
#endif
struct scatterlist *sg_dump_data;
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
@ -2598,7 +2766,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@ -2694,6 +2862,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data)
{
struct iwl_dbg_dump_complete_cmd hcmd_data;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
.data[0] = &hcmd_data,
.len[0] = sizeof(hcmd_data),
};
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
hcmd_data.tp = cpu_to_le32(timepoint);
hcmd_data.tp_data = cpu_to_le32(timepoint_data);
iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
}
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
@ -2702,7 +2892,8 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
u32 policy;
u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@ -2728,6 +2919,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
policy = le32_to_cpu(dump_data->trig->apply_policy);
time_point = le32_to_cpu(dump_data->trig->time_point);
if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
}
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
@ -2801,9 +3002,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
if (fwrt->ops && fwrt->ops->dump_start)
fwrt->ops->dump_start(fwrt->ops_ctx);
iwl_fw_dbg_collect_sync(fwrt, wks->idx);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data);
#endif /* __iwl_fw_dbg_h__ */

View File

@ -0,0 +1,409 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include "api/commands.h"
#include "debugfs.h"
#include "dbg.h"
#include <linux/seq_file.h>
#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
struct dbgfs_##name##_data { \
argtype *arg; \
bool read_done; \
ssize_t rlen; \
char rbuf[buflen]; \
}; \
static int _iwl_dbgfs_##name##_open(struct inode *inode, \
struct file *file) \
{ \
struct dbgfs_##name##_data *data; \
\
data = kzalloc(sizeof(*data), GFP_KERNEL); \
if (!data) \
return -ENOMEM; \
\
data->read_done = false; \
data->arg = inode->i_private; \
file->private_data = data; \
\
return 0; \
}
#define FWRT_DEBUGFS_READ_WRAPPER(name) \
static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct dbgfs_##name##_data *data = file->private_data; \
\
if (!data->read_done) { \
data->read_done = true; \
data->rlen = iwl_dbgfs_##name##_read(data->arg, \
sizeof(data->rbuf),\
data->rbuf); \
} \
\
if (data->rlen < 0) \
return data->rlen; \
return simple_read_from_buffer(user_buf, count, ppos, \
data->rbuf, data->rlen); \
}
static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
FWRT_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = _iwl_dbgfs_##name##_read, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
argtype *arg = \
((struct dbgfs_##name##_data *)file->private_data)->arg;\
char buf[buflen] = {}; \
size_t buf_size = min(count, sizeof(buf) - 1); \
\
if (copy_from_user(buf, user_buf, buf_size)) \
return -EFAULT; \
\
return iwl_dbgfs_##name##_write(arg, buf, buf_size); \
}
#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
FWRT_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
.read = _iwl_dbgfs_##name##_read, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \
_FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
debugfs_create_file(alias, mode, parent, fwrt, \
&iwl_dbgfs_##name##_ops); \
} while (0)
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
{
struct iwl_mvm_marker marker = {
.dw_len = sizeof(struct iwl_mvm_marker) / 4,
.marker_id = MARKER_ID_SYNC_CLOCK,
/* the real timestamp is taken from the ftrace clock
* this is for finding the match between fw and kernel logs
*/
.timestamp = cpu_to_le64(fwrt->timestamp.seq++),
};
struct iwl_host_cmd hcmd = {
.id = MARKER_CMD,
.flags = CMD_ASYNC,
.data[0] = &marker,
.len[0] = sizeof(marker),
};
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
struct iwl_dbg_host_event_cfg_cmd event_cfg;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
.flags = CMD_ASYNC,
.data[0] = &event_cfg,
.len[0] = sizeof(event_cfg),
};
u32 enabled_severities;
int ret = kstrtou32(buf, 10, &enabled_severities);
if (ret < 0)
return ret;
event_cfg.enabled_severities = cpu_to_le32(enabled_severities);
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
IWL_INFO(fwrt,
"sent host event cfg with enabled_severities: %u, ret: %d\n",
enabled_severities, ret);
return ret ?: count;
}
FWRT_DEBUGFS_WRITE_FILE_OPS(enabled_severities, 16);
static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
{
int ret;
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
unsigned long delay = fwrt->timestamp.delay;
ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
if (!ret && delay)
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(delay));
else
IWL_INFO(fwrt,
"stopping timestamp_marker, ret: %d, delay: %u\n",
ret, jiffies_to_msecs(delay) / 1000);
}
void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
{
IWL_INFO(fwrt,
"starting timestamp_marker trigger with delay: %us\n",
delay);
iwl_fw_cancel_timestamp(fwrt);
fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(fwrt->timestamp.delay));
}
static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
int ret;
u32 delay;
ret = kstrtou32(buf, 10, &delay);
if (ret < 0)
return ret;
iwl_fw_trigger_timestamp(fwrt, delay);
return count;
}
static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
return scnprintf(buf, size, "%d\n", delay_secs);
}
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
struct hcmd_write_data {
__be32 cmd_id;
__be32 flags;
__be16 length;
u8 data[];
} __packed;
static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
size_t count)
{
size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
size_t data_size = (count - 1) / 2;
int ret;
struct hcmd_write_data *data;
struct iwl_host_cmd hcmd = {
.len = { 0, },
.data = { NULL, },
};
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
return -EINVAL;
data = kmalloc(data_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = hex2bin((u8 *)data, buf, data_size);
if (ret)
goto out;
hcmd.id = be32_to_cpu(data->cmd_id);
hcmd.flags = be32_to_cpu(data->flags);
hcmd.len[0] = be16_to_cpu(data->length);
hcmd.data[0] = data->data;
if (count != header_size + hcmd.len[0] * 2 + 1) {
IWL_ERR(fwrt,
"host command data size does not match header length\n");
ret = -EINVAL;
goto out;
}
if (fwrt->ops && fwrt->ops->send_hcmd)
ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
else
ret = -EPERM;
if (ret < 0)
goto out;
if (hcmd.flags & CMD_WANT_SKB)
iwl_free_resp(&hcmd);
out:
kfree(data);
return ret ?: count;
}
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
return scnprintf(buf, size, "0x%08x\n",
fwrt->trans->dbg.domains_bitmap);
}
FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
struct iwl_dbgfs_fw_info_priv {
struct iwl_fw_runtime *fwrt;
};
struct iwl_dbgfs_fw_info_state {
loff_t pos;
};
static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
void *v, loff_t *pos)
{
struct iwl_dbgfs_fw_info_state *state = v;
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
*pos = ++state->pos;
if (*pos >= fw->ucode_capa.n_cmd_versions)
return NULL;
return state;
}
static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq,
void *v)
{
kfree(v);
}
static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos)
{
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
struct iwl_dbgfs_fw_info_state *state;
if (*pos >= fw->ucode_capa.n_cmd_versions)
return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->pos = *pos;
return state;
};
static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
{
struct iwl_dbgfs_fw_info_state *state = v;
struct iwl_dbgfs_fw_info_priv *priv = seq->private;
const struct iwl_fw *fw = priv->fwrt->fw;
const struct iwl_fw_cmd_version *ver;
u32 cmd_id;
if (!state->pos)
seq_puts(seq, "fw_api_ver:\n");
ver = &fw->ucode_capa.cmd_versions[state->pos];
cmd_id = WIDE_ID(ver->group, ver->cmd);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",
iwl_get_cmd_string(priv->fwrt->trans, cmd_id));
seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver);
seq_printf(seq, " notif_ver: %d\n", ver->notif_ver);
return 0;
}
static const struct seq_operations iwl_dbgfs_info_seq_ops = {
.start = iwl_dbgfs_fw_info_seq_start,
.next = iwl_dbgfs_fw_info_seq_next,
.stop = iwl_dbgfs_fw_info_seq_stop,
.show = iwl_dbgfs_fw_info_seq_show,
};
static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp)
{
struct iwl_dbgfs_fw_info_priv *priv;
priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops,
sizeof(*priv));
if (!priv)
return -ENOMEM;
priv->fwrt = inode->i_private;
return 0;
}
static const struct file_operations iwl_dbgfs_fw_info_ops = {
.owner = THIS_MODULE,
.open = iwl_dbgfs_fw_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}

View File

@ -12,6 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
#include "pnvm.h"
/*
* Note: This structure is read from the device with IO accesses,
@ -19,53 +20,6 @@
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 gp3; /* GP3 timer register */
u32 ucode_ver; /* uCode version */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
@ -147,6 +101,7 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
struct iwl_trans *trans = fwrt->trans;
struct iwl_umac_error_event_table table = {};
u32 base = fwrt->trans->dbg.umac_error_event_table;
char pnvm_name[MAX_PNVM_NAME];
if (!base &&
!(fwrt->trans->dbg.error_event_table_tlv_status &
@ -164,6 +119,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
fwrt->trans->status, table.valid);
}
if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) ==
FW_SYSASSERT_PNVM_MISSING) {
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
IWL_ERR(fwrt, "PNVM data is missing, please install %s\n",
pnvm_name);
}
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
@ -212,7 +174,9 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
iwl_trans_sw_reset(trans);
err = iwl_trans_sw_reset(trans, true);
if (err)
return;
err = iwl_finish_nic_init(trans);
if (err)
@ -295,21 +259,21 @@ struct iwl_tcm_error_event_table {
u32 reserved[4];
} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_tcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.tcm_error_event_table;
u32 base = fwrt->trans->dbg.tcm_error_event_table[idx];
int i;
u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 :
IWL_ERROR_EVENT_TABLE_TCM1;
if (!base ||
!(fwrt->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_TCM))
if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
IWL_ERR(fwrt, "TCM status:\n");
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
@ -328,13 +292,72 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH);
/*
* RCM error struct.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_rcm_error_event_table {
u32 valid;
u32 error_id;
u32 blink2;
u32 ilink1;
u32 ilink2;
u32 data1, data2, data3;
u32 logpc;
u32 frame_pointer;
u32 stack_pointer;
u32 msgid;
u32 isr;
u32 frame_hw_status;
u32 mbx_lmac_to_rcm_req;
u32 mbx_rcm_to_lmac_req;
u32 mh_ctl;
u32 mh_addr1_lo;
u32 mh_info;
u32 mh_err;
u32 reserved[3];
} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */
IWL_ERR(fwrt, "Function Scratch status:\n");
IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
}
static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_rcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.rcm_error_event_table[idx];
u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 :
IWL_ERROR_EVENT_TABLE_RCM1;
if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n",
table.mbx_lmac_to_rcm_req);
IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n",
table.mbx_rcm_to_lmac_req);
IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
@ -418,8 +441,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
if (fwrt->trans->dbg.lmac_error_event_table[1])
iwl_fwrt_dump_lmac_error_log(fwrt, 1);
iwl_fwrt_dump_umac_error_log(fwrt);
iwl_fwrt_dump_tcm_error_log(fwrt);
iwl_fwrt_dump_tcm_error_log(fwrt, 0);
iwl_fwrt_dump_rcm_error_log(fwrt, 0);
iwl_fwrt_dump_tcm_error_log(fwrt, 1);
iwl_fwrt_dump_rcm_error_log(fwrt, 1);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
IWL_ERR(fwrt, "Function Scratch status:\n");
IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
}
}
IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);

View File

@ -231,6 +231,24 @@ struct iwl_fw_error_dump_mem {
/* Use bit 31 as dump info type to avoid colliding with region types */
#define IWL_INI_DUMP_INFO_TYPE BIT(31)
/**
* struct iwl_fw_error_dump_data - data for one type
* @type: &enum iwl_fw_ini_region_type
* @sub_type: sub type id
* @sub_type_ver: sub type version
* @reserved: not in use
* @len: the length starting from %data
* @data: the data itself
*/
struct iwl_fw_ini_error_dump_data {
u8 type;
u8 sub_type;
u8 sub_type_ver;
u8 reserved;
__le32 len;
__u8 data[];
} __packed;
/**
* struct iwl_fw_ini_dump_entry
* @list: list of dump entries

View File

@ -98,7 +98,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
@ -120,7 +119,7 @@ enum iwl_ucode_tlv_type {
struct iwl_ucode_tlv {
__le32 type; /* see above */
__le32 length; /* not including type/length fields */
u8 data[0];
u8 data[];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@ -313,7 +312,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@ -371,6 +369,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* reset flow
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@ -389,7 +389,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
@ -422,6 +421,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@ -456,6 +456,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */

View File

@ -1,14 +1,17 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2019 - 2020 Intel Corporation
* Copyright(c) 2019 - 2021 Intel Corporation
*/
#include <fw/api/commands.h>
#include "img.h"
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
/* prior to LONG_GROUP, we never used this CMD version API */
u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
u8 cmd = iwl_cmd_opcode(cmd_id);
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
@ -49,10 +52,9 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
#define FW_SYSASSERT_CPU_MASK 0xf0000000
static const struct {
const char *name;
u8 num;
u32 num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
{ "SYSASSERT", 0x35 },
@ -73,6 +75,7 @@ static const struct {
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
{ "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
{ "ADVANCED_SYSASSERT", 0 },
};

View File

@ -275,8 +275,12 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
#define FW_SYSASSERT_CPU_MASK 0xf0000000
#define FW_SYSASSERT_PNVM_MISSING 0x0010070d
#endif /* __iwl_fw_img_h__ */

View File

@ -8,7 +8,7 @@
#include "dbg.h"
#include "debugfs.h"
#include "fw/api/soc.h"
#include "fw/api/system.h"
#include "fw/api/commands.h"
#include "fw/api/rx.h"
#include "fw/api/datapath.h"
@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
.id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC,
if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);

View File

@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};

View File

@ -16,7 +16,7 @@
#include "fw/acpi.h"
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@ -156,8 +156,13 @@ struct iwl_fw_runtime {
u8 sar_chain_b_profile;
struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
u32 geo_rev;
union iwl_ppag_table_cmd ppag_table;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
#endif
};

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
else
cmd.id = SHARED_MEM_CFG;

View File

@ -11,6 +11,7 @@
#include "fw/uefi.h"
#include "fw/api/alive.h"
#include <linux/efi.h>
#include "fw/runtime.h"
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
0xb2, 0xec, 0xf5, 0xa3, \
@ -68,7 +69,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
@ -78,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -86,6 +87,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-EINVAL);
goto out;
}
@ -105,6 +107,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
IWL_DEBUG_FW(trans,
"Couldn't allocate (more) reduce_power_data\n");
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOMEM);
goto out;
}
@ -134,6 +137,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
done:
if (!size) {
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
/* Better safe than sorry, but 'reduce_power_data' should
* always be NULL if !size.
*/
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOENT);
goto out;
}
@ -147,7 +154,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@ -156,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
tlv = (void *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@ -168,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
struct iwl_sku_id *sku_id =
(void *)(data + sizeof(*tlv));
const struct iwl_sku_id *sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
@ -260,3 +267,90 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
return data;
}
#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
int i, j;
if (sgom_data->revision != 1)
return -EINVAL;
memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map,
sizeof(fwrt->sgom_table.offset_map));
for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) {
for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) {
/* since each byte is composed of to values, */
/* one for each letter, */
/* extract and check each of them separately */
u8 value = fwrt->sgom_table.offset_map[i][j];
u8 low = value & 0xF;
u8 high = (value & 0xF0) >> 4;
if (high > fwrt->geo_num_profiles)
high = 0;
if (low > fwrt->geo_num_profiles)
low = 0;
fwrt->sgom_table.offset_map[i][j] = (high << 4) | low;
}
}
fwrt->sgom_enabled = true;
return 0;
}
void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct efivar_entry *sgom_efivar;
struct uefi_cnv_wlan_sgom_data *data;
unsigned long package_size;
int err, ret;
if (!fwrt->geo_enabled)
return;
sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL);
if (!sgom_efivar)
return;
memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME,
sizeof(IWL_UEFI_SGOM_NAME));
sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
/* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
*/
package_size = IWL_HARDCODED_SGOM_SIZE;
data = kmalloc(package_size, GFP_KERNEL);
if (!data) {
data = ERR_PTR(-ENOMEM);
goto out;
}
err = efivar_entry_get(sgom_efivar, NULL, &package_size, data);
if (err) {
IWL_DEBUG_FW(trans,
"SGOM UEFI variable not found %d\n", err);
goto out_free;
}
IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n");
out_free:
kfree(data);
out:
kfree(sgom_efivar);
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
#endif /* CONFIG_ACPI */

View File

@ -7,6 +7,7 @@
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
/*
* TODO: we have these hardcoded values that the caller must pass,
@ -16,6 +17,7 @@
*/
#define IWL_HARDCODED_PNVM_SIZE 4096
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
#define IWL_HARDCODED_SGOM_SIZE 339
struct pnvm_sku_package {
u8 rev;
@ -25,6 +27,16 @@ struct pnvm_sku_package {
u8 data[];
} __packed;
struct uefi_cnv_wlan_sgom_data {
u8 revision;
u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1];
} __packed;
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
*/
#ifdef CONFIG_EFI
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
@ -42,4 +54,12 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
}
#endif /* CONFIG_EFI */
#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
#else
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
}
#endif
#endif /* __iwl_fw_uefi__ */

View File

@ -84,6 +84,10 @@ enum iwl_nvm_type {
#define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
NETIF_F_TSO | NETIF_F_TSO6)
#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
IWL_TX_CSUM_NETIF_FLAGS_BZ | \
NETIF_F_RXCSUM)
/* Antenna presence definitions */
#define ANT_NONE 0x0
@ -339,8 +343,8 @@ struct iwl_fw_mon_regs {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation
* @min_ba_txq_size: minimum number of slots required in a TX queue which
* based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
@ -401,9 +405,10 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
u32 min_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@ -429,6 +434,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
@ -448,6 +454,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_CDB 0x0
#define IWL_CFG_CDB 0x1
#define IWL_CFG_NO_JACKET 0x0
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
@ -462,6 +471,7 @@ struct iwl_dev_info {
u8 no_160;
u8 cores;
u8 cdb;
u8 jacket;
const struct iwl_cfg *cfg;
const char *name;
};
@ -501,6 +511,7 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@ -610,7 +621,6 @@ extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
@ -624,9 +634,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
@ -634,6 +647,12 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

View File

@ -105,9 +105,14 @@
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
/* Doorbell NMI (since Bz) */
#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114)
#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
#define CSR_IPC_SLEEP_CONTROL_RESUME 0
/* Doorbell - since Bz
* connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only)
*/
#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130)
#define CSR_DOORBELL_VECTOR_NMI BIT(1)
/* host chicken bits */
#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
@ -143,8 +148,7 @@
#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101)
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F)
#define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080)
#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
@ -287,8 +291,7 @@
#define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31)
/* HW REV */
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
#define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH)
#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
/* HW RFID */
@ -306,6 +309,7 @@ enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
SILICON_C_STEP,
SILICON_Z_STEP = 0xf,
};
@ -328,10 +332,10 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
#define CSR_HW_REV_TYPE_QU_B0 (0x0000334)
#define CSR_HW_REV_TYPE_QU_C0 (0x0000338)
#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361)
#define CSR_HW_REV_TYPE_QU_B0 (0x0000331)
#define CSR_HW_REV_TYPE_QU_C0 (0x0000332)
#define CSR_HW_REV_TYPE_QUZ (0x0000351)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
#define CSR_HW_REV_TYPE_TY (0x0000420)
@ -529,6 +533,9 @@ enum {
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
/* This register is common for Tx and Rx, Rx queues start from 512 */
#define HBUS_TARG_WRPTR_Q_SHIFT (16)
#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
/**********************************************************
* CSR values

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@ -59,7 +59,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
if (!node)
return -ENOMEM;
memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
return 0;
@ -177,15 +178,15 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
struct iwl_ucode_tlv **active_reg;
u32 id = le32_to_cpu(reg->id);
u32 type = le32_to_cpu(reg->type);
u8 type = reg->type;
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
* The higher part of the ID in version 2 is irrelevant for
* us, so mask it out.
* The higher part of the ID from version 2 is debug policy.
* The id will be only lsb 16 bits, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) == 2)
id &= IWL_FW_INI_REGION_V2_MASK;
if (le32_to_cpu(reg->hdr.version) >= 2)
id &= IWL_FW_INI_REGION_ID_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EOPNOTSUPP;
}
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);
trans->dbg.imr_data.sram_size =
le32_to_cpu(reg->internal_buffer.size);
}
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@ -233,6 +242,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
struct iwl_ucode_tlv *dup = NULL;
int ret;
@ -247,6 +257,10 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return -EINVAL;
}
IWL_DEBUG_FW(trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
if (!le32_to_cpu(trig->occurrences)) {
dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
GFP_KERNEL);
@ -300,14 +314,21 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext)
{
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type;
u32 tlv_idx;
u32 domain;
int ret;
if (le32_to_cpu(tlv->length) < sizeof(*hdr))
return;
type = le32_to_cpu(tlv->type);
tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
domain = le32_to_cpu(hdr->domain);
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & trans->dbg.domains_bitmap)) {
IWL_DEBUG_FW(trans,
@ -473,7 +494,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
int res;
if (!iwlwifi_mod_params.enable_ini ||
trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
return;
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
@ -565,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
@ -750,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
{
int ret, i, dram_alloc = 0;
struct iwl_dram_info dram_info;
int ret, i;
bool dram_alloc = false;
struct iwl_dram_data *frags =
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
struct iwl_dram_info *dram_info;
if (!frags || !frags->block)
return;
dram_info = frags->block;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
return;
dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
dram_alloc++;
dram_alloc = true;
else
IWL_WARN(fwrt,
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
i, ret);
}
if (dram_alloc) {
memcpy(frags->block, &dram_info, sizeof(dram_info));
IWL_DEBUG_FW(fwrt, "block data after %016x\n",
*((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
}
if (dram_alloc)
IWL_DEBUG_FW(fwrt, "block data after %08x\n",
dram_info->first_word);
else
memset(frags->block, 0, sizeof(*dram_info));
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@ -799,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
struct list_head *config_list)
struct list_head *conf_list)
{
struct iwl_dbg_tlv_node *node;
list_for_each_entry(node, config_list, list) {
list_for_each_entry(node, conf_list, list) {
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
u32 count, address, value;
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@ -849,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
struct iwl_dbgc1_info dram_info = {};
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
__le64 dram_base_addr = cpu_to_le64(frags->physical);
__le32 dram_size = cpu_to_le32(frags->size);
u64 dram_addr = le64_to_cpu(dram_base_addr);
__le64 dram_base_addr;
__le32 dram_size;
u64 dram_addr;
u32 ret;
if (!frags)
break;
dram_base_addr = cpu_to_le64(frags->physical);
dram_size = cpu_to_le32(frags->size);
dram_addr = le64_to_cpu(dram_base_addr);
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
dram_base_addr, dram_size);
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
@ -1159,6 +1193,8 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
data);
int ret, i;
u32 tp = le32_to_cpu(dump_data.trig->time_point);
if (!num_data) {
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
@ -1177,8 +1213,42 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
break;
}
}
}
fwrt->trans->dbg.restart_required = FALSE;
IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n",
fwrt->trans->dbg.restart_required,
fwrt->trans->dbg.last_tp_resetfw);
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
fwrt->trans->dbg.restart_required = TRUE;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
fwrt->trans->dbg.restart_required = FALSE;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n");
fwrt->trans->dbg.restart_required = TRUE;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n");
fwrt->trans->dbg.restart_required = FALSE;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
IWL_DEBUG_INFO(fwrt,
"WRT: nothing need to be done after debug collection\n");
} else {
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
le32_to_cpu(dump_data.trig->reset_fw));
}
}
return 0;
}
@ -1244,7 +1314,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
}
reg = (void *)(*active_reg)->data;
reg_type = le32_to_cpu(reg->type);
reg_type = reg->type;
if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
!(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))

View File

@ -10,7 +10,9 @@
#endif
#include "iwl-drv.h"
#include "iwl-debug.h"
#if defined(__FreeBSD__)
#include "iwl-modparams.h"
#endif
#include "iwl-devtrace.h"
#if defined(__FreeBSD__)
@ -141,7 +143,7 @@ void __iwl_dbg(struct device *dev,
va_start(args, fmt);
vaf.va = &args;
#if defined(CONFIG_IWLWIFI_DEBUG)
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit())) {
#if defined(__linux_)

View File

@ -141,6 +141,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
/* clear the data for the aborted load case */
memset(&drv->fw, 0, sizeof(drv->fw));
}
static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
@ -174,8 +177,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
char tag[8];
if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
(drv->trans->hw_rev_step != SILICON_B_STEP &&
drv->trans->hw_rev_step != SILICON_C_STEP)) {
IWL_ERR(drv,
"Only HW steps B and C are currently supported (0x%0x)\n",
drv->trans->hw_rev);
@ -598,6 +601,66 @@ static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
excl->size = le32_to_cpu(fw->size);
}
static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_region_tlv *region;
u32 length = le32_to_cpu(tlv->length);
u32 addr;
if (length < offsetof(typeof(*region), special_mem) +
sizeof(region->special_mem))
return;
region = (const void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY)
return;
switch (region->sub_type) {
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE:
drv->trans->dbg.umac_error_event_table = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_UMAC;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE:
drv->trans->dbg.lmac_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE:
drv->trans->dbg.lmac_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_LMAC2;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE:
drv->trans->dbg.tcm_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE:
drv->trans->dbg.tcm_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM2;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE:
drv->trans->dbg.rcm_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_RCM1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE:
drv->trans->dbg.rcm_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_RCM2;
break;
default:
break;
}
}
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
@ -653,8 +716,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
tlv = (const struct iwl_ucode_tlv *)data;
tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
tlv_data = tlv->data;
@ -885,7 +948,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_VERSION: {
const __le32 *ptr = (const __le32 *)tlv_data;
const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u8 local_comp;
@ -1166,22 +1229,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
const struct iwl_fw_tcm_error_addr *ptr =
(const void *)tlv_data;
if (tlv_len != sizeof(*ptr))
goto invalid_tlv_len;
drv->trans->dbg.tcm_error_event_table =
le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM;
break;
}
case IWL_UCODE_TLV_TYPE_REGIONS:
iwl_parse_dbg_tlv_assert_tables(drv, tlv);
fallthrough;
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
case IWL_UCODE_TLV_TYPE_TRIGGERS:
case IWL_UCODE_TLV_TYPE_CONF_SET:
if (iwlwifi_mod_params.enable_ini)
@ -1237,8 +1290,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
#if defined(__linux__)
iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
#elif defined(__FreeBSD__)
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", (u8 *)data, len);
iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", data, len);
#endif
#endif
return -EINVAL;
}
@ -1248,8 +1305,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
invalid_tlv_len:
IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
tlv_error:
#if defined(__linux__)
iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
#elif defined(__FreeBSD__)
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", tlv_data, tlv_len);
#endif
#endif
return -EINVAL;
@ -1331,23 +1392,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
for (retry = 0; retry <= max_retry; retry++) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
drv->dbgfs_drv);
dbgfs_dir = drv->dbgfs_op_mode;
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
drv->dbgfs_drv);
dbgfs_dir = drv->dbgfs_op_mode;
#endif
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
op_mode = ops->start(drv->trans, drv->trans->cfg,
&drv->fw, dbgfs_dir);
if (op_mode)
return op_mode;
IWL_ERR(drv, "retry init count %d\n", retry);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (!op_mode) {
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
}
#endif
}
return op_mode;
return NULL;
}
static void _iwl_op_mode_stop(struct iwl_drv *drv)
@ -1385,6 +1454,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
bool usniffer_images = false;
bool failure = true;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@ -1600,6 +1670,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
mutex_lock(&iwlwifi_opmode_table_mtx);
switch (fw->type) {
case IWL_FW_DVM:
@ -1616,8 +1688,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@ -1645,15 +1715,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* else from proceeding if the module fails to load
* or hangs loading.
*/
if (load_module) {
if (load_module)
request_module("%s", op->name);
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err);
#endif
}
failure = false;
goto free;
try_again:
@ -1669,6 +1733,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
free:
if (failure)
iwl_dealloc_ucode(drv);
if (pieces) {
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
kfree(pieces->img[i].sec);

View File

@ -84,9 +84,12 @@ void iwl_drv_stop(struct iwl_drv *drv);
* everything is built-in, then we can avoid that.
*/
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
#else
#define IWL_EXPORT_SYMBOL(sym)
#endif
/* max retry for init flow */
#define IWL_MAX_INIT_RETRY 2
#endif /* __iwl_drv_h__ */

View File

@ -26,26 +26,22 @@
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@ -54,7 +50,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
"Acquired semaphore after %d tries.\n",

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@ -580,7 +580,7 @@ struct iwl_rb_status {
__le16 closed_fr_num;
__le16 finished_rb_num;
__le16 finished_fr_nam;
__le32 spare;
__le32 __spare;
} __packed;
@ -590,11 +590,31 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 1024
#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
/* IMR DMA registers */
#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
/* RFH S2D DMA registers */
#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
/* TFH D2S DMA registers */
#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
#define IMR_UREG_CHICK 0x00d05c00
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
* struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
struct iwl_gen3_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
struct iwl_gen3_bc_tbl_entry {
__le16 tfd_offset;
} __packed;
#endif /* !__iwl_fh_h__ */

View File

@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans)) {
value = iwl_read32(trans, reg);
u32 value = iwl_read32(trans, reg);
iwl_trans_release_nic_access(trans);
return value;
}
return value;
return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
u32 val = iwl_read_prph_no_grab(trans, ofs);
iwl_trans_release_nic_access(trans);
return val;
}
return val;
return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
@ -218,7 +220,7 @@ void iwl_force_nmi(struct iwl_trans *trans)
UREG_DOORBELL_TO_ISR6_NMI_BIT);
else
iwl_write32(trans, CSR_DOORBELL_VECTOR,
CSR_DOORBELL_VECTOR_NMI);
UREG_DOORBELL_TO_ISR6_NMI_BIT);
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);

View File

@ -76,7 +76,11 @@ struct iwl_mod_params {
bool power_save;
int power_level;
#ifdef CONFIG_IWLWIFI_DEBUG
#if defined(__linux__)
u32 debug_level;
#elif defined(__FreeBSD__)
enum iwl_dl debug_level;
#endif
#endif
char *nvm_file;
u32 uapsd_disable;

View File

@ -22,6 +22,7 @@
#include "fw/api/commands.h"
#include "fw/api/cmdhdr.h"
#include "fw/img.h"
#include "mei/iwl-mei.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@ -583,9 +584,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
.phy_cap_info[3] =
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@ -607,7 +608,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED,
(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED <<
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS),
.phy_cap_info[10] =
IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF,
},
@ -652,9 +654,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
@ -664,7 +666,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED,
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED
<< IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS,
},
/*
* Set default Tx/Rx HE MCS NSS Support field.
@ -729,7 +732,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
/* we know it's writable - we set it before ourselves */
iftype_data = (void *)sband->iftype_data;
iftype_data = (void *)(uintptr_t)sband->iftype_data;
for (i = 0; i < sband->n_iftype_data; i++)
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
@ -781,6 +784,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_MR:
case IWL_CFG_RF_TYPE_MS:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@ -918,7 +922,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
return le32_to_cpup((const __le32 *)(nvm_sw +
NVM_VERSION_EXT_NVM));
NVM_VERSION_EXT_NVM));
}
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
@ -1077,10 +1081,13 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL;
}
if (!trans->csme_own)
#if defined(__linux__)
IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
#elif defined(__FreeBSD__)
IWL_INFO(trans, "base HW address: %6D\n", data->hw_addr, ":");
IWL_INFO(trans, "base HW address: %6D, OTP minor version: 0x%x\n",
data->hw_addr, ":", iwl_read_prph(trans, REG_OTP_MINOR));
#endif
return 0;
@ -1118,6 +1125,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return false;
}
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw)
{
struct iwl_nvm_data *data;
u32 sbands_flags = 0;
u8 rx_chains = fw->valid_rx_ant;
u8 tx_chains = fw->valid_rx_ant;
if (cfg->uhb_supported)
data = kzalloc(struct_size(data, channels,
IWL_NVM_NUM_CHANNELS_UHB),
GFP_KERNEL);
else
data = kzalloc(struct_size(data, channels,
IWL_NVM_NUM_CHANNELS_EXT),
GFP_KERNEL);
if (!data)
return NULL;
BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) !=
IWL_NVM_NUM_CHANNELS_UHB);
data->nvm_version = mei_nvm->nvm_version;
iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg);
if (data->valid_tx_ant)
tx_chains &= data->valid_tx_ant;
if (data->valid_rx_ant)
rx_chains &= data->valid_rx_ant;
data->sku_cap_mimo_disabled = false;
data->sku_cap_band_24ghz_enable = true;
data->sku_cap_band_52ghz_enable = true;
data->sku_cap_11n_enable =
!(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL);
data->sku_cap_11ac_enable = true;
data->sku_cap_11ax_enable =
mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT;
data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT;
data->n_hw_addrs = mei_nvm->n_hw_addrs;
/* If no valid mac address was found - bail out */
if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) {
kfree(data);
return NULL;
}
if (data->lar_enabled &&
fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains,
sbands_flags, true, fw);
return data;
}
IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data);
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw,
@ -1326,8 +1393,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
nvm_chan = iwl_nvm_channels;
}
if (WARN_ON(num_of_ch > max_num_ch))
if (num_of_ch > max_num_ch) {
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
"Num of channels (%d) is greater than expected. Truncating to %d\n",
num_of_ch, max_num_ch);
num_of_ch = max_num_ch;
}
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@ -1552,7 +1623,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
/* nvm file validation, dword_buff[2] holds the file version */
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
trans->hw_rev_step == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2015, 2018-2020 Intel Corporation
* Copyright (C) 2005-2015, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@ -8,6 +8,7 @@
#include <net/cfg80211.h>
#include "iwl-eeprom-parse.h"
#include "mei/iwl-mei.h"
/**
* enum iwl_nvm_sbands_flags - modification flags for the channel profiles
@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
/**
* iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
*/
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2020 Intel Corporation
* Copyright (C) 2005-2014, 2020-2021 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#include <linux/slab.h>
@ -13,8 +13,6 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
struct iwl_phy_db_entry {
u16 size;
u8 *data;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@ -347,19 +347,17 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
#define WFPM_CTRL_REG_GEN2 0xd03030
#define WFPM_OTP_CFG1_ADDR 0x00a03098
#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
#define WFPM_GP2 0xA030B4
/* DBGI SRAM Register details */
#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148
#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
enum {
ENABLE_WFPM = BIT(31),
@ -388,6 +386,11 @@ enum {
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
#define UREG_LMAC2_CURRENT_PC 0xa05c20
#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
#define HPM_SECONDARY_DEVICE_STATE 0xa03404
/* For UMAG_GEN_HW_STATUS reg check */
enum {
UMAG_GEN_HW_IS_FPGA = BIT(1),
@ -455,6 +458,13 @@ enum {
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
/*
* From BZ family driver triggers this bit for suspend and resume
* The driver should update CSR_IPC_SLEEP_CONTROL before triggering
* this interrupt with suspend/resume value
*/
#define UREG_DOORBELL_TO_ISR6_SLEEP_CTRL BIT(31)
#define CNVI_MBOX_C 0xA3400C
#define FSEQ_ERROR_CODE 0xA340C8
@ -486,4 +496,6 @@ enum {
#define HBUS_TIMEOUT 0xA5A5A5A1
#define WFPM_DPHY_OFF 0xDF10FF
#define REG_OTP_MINOR 0xA0333C
#endif /* __iwl_prph_h__ */

View File

@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
trans->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -197,7 +197,10 @@ enum iwl_error_event_table_status {
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
};
/**
@ -309,6 +312,8 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@ -321,6 +326,7 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
static inline int
@ -413,6 +419,9 @@ struct iwl_dump_sanitize_ops {
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
* @fw_reset_handshake: firmware supports reset flow handshake
* @queue_alloc_cmd_ver: queue allocation command version, set to 0
* for using the older SCD_QUEUE_CFG, set to the version of
* SCD_QUEUE_CONFIG_CMD otherwise.
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@ -431,6 +440,7 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
u8 queue_alloc_cmd_ver;
};
struct iwl_trans_dump_data {
@ -576,10 +586,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
__le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int queue_wdg_timeout);
int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid,
int size, unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
@ -606,7 +615,7 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
void (*sw_reset)(struct iwl_trans *trans);
int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
bool (*grab_nic_access)(struct iwl_trans *trans);
void (*release_nic_access)(struct iwl_trans *trans);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
@ -622,6 +631,10 @@ struct iwl_trans_ops {
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
int (*imr_dma_data)(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr,
u32 byte_cnt);
};
/**
@ -728,6 +741,26 @@ struct iwl_self_init_dram {
int paging_cnt;
};
/**
* struct iwl_imr_data - imr dram data used during debug process
* @imr_enable: imr enable status received from fw
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
* @imr2sram_remainbyte`: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
struct iwl_imr_data {
u32 imr_enable;
u32 imr_size;
u32 sram_addr;
u32 sram_size;
u32 imr2sram_remainbyte;
u64 imr_curr_addr;
__le64 imr_base_addr;
};
/**
* struct iwl_trans_debug - transport debug related data
*
@ -738,7 +771,8 @@ struct iwl_self_init_dram {
* @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
* @tcm_error_event_table: address of TCM error table
* @tcm_error_event_table: address(es) of TCM error table(s)
* @rcm_error_event_table: address(es) of RCM error table(s)
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@ -765,7 +799,8 @@ struct iwl_trans_debug {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
u32 tcm_error_event_table;
u32 tcm_error_event_table[2];
u32 rcm_error_event_table[2];
unsigned int error_event_table_tlv_status;
enum iwl_ini_cfg_state internal_ini_cfg;
@ -788,6 +823,9 @@ struct iwl_trans_debug {
u32 domains_bitmap;
u32 ucode_preset;
bool restart_required;
u32 last_tp_resetfw;
struct iwl_imr_data imr_data;
};
struct iwl_dma_ptr {
@ -907,6 +945,7 @@ struct iwl_txq {
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@ -932,11 +971,14 @@ struct iwl_trans_txqs {
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
u8 queue_alloc_cmd_ver;
};
/**
* struct iwl_trans - transport common data
*
* @csme_own - true if we couldn't get ownership on the device
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
@ -950,6 +992,7 @@ struct iwl_trans_txqs {
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @wide_cmd_header: true when ucode supports wide command header format
@ -971,6 +1014,7 @@ struct iwl_trans_txqs {
* @iwl_trans_txqs: transport tx queues data.
*/
struct iwl_trans {
bool csme_own;
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
@ -982,6 +1026,7 @@ struct iwl_trans {
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
u32 hw_rev_step;
u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
@ -1219,9 +1264,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
__le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int wdg_timeout)
u32 flags, u32 sta_mask, u8 tid,
int size, unsigned int wdg_timeout)
{
might_sleep();
@ -1233,8 +1277,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
return trans->ops->txq_alloc(trans, flags, sta_id, tid,
cmd_id, size, wdg_timeout);
return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
@ -1367,6 +1411,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr,
u32 byte_cnt)
{
if (trans->ops->imr_dma_data)
return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
return 0;
}
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
@ -1395,10 +1448,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
bool retake_ownership)
{
if (trans->ops->sw_reset)
trans->ops->sw_reset(trans);
return trans->ops->sw_reset(trans, retake_ownership);
return 0;
}
static inline void

View File

@ -0,0 +1,138 @@
/*-
* Copyright (c) 2022 The FreeBSD Foundation
*
* This software was developed by Björn Zeeb under sponsorship from
* the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IWL_MEI_IWL_MEI_H
#define _IWL_MEI_IWL_MEI_H
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
enum mei_nvm_caps {
MEI_NVM_CAPS_11AX_SUPPORT = BIT(0),
MEI_NVM_CAPS_LARI_SUPPORT = BIT(1),
};
struct iwl_mei_nvm {
uint8_t n_hw_addrs;
enum mei_nvm_caps caps;
uint32_t nvm_version;
uint32_t radio_cfg;
uint32_t channels[110 /* IWL_NVM_NUM_CHANNELS_UHB */];
};
struct iwl_mei_conn_info {
int __dummy;
};
struct iwl_mei_ops {
void (*me_conn_status)(void *, const struct iwl_mei_conn_info *);
void (*nic_stolen)(void *);
void (*rfkill)(void *, bool);
void (*roaming_forbidden)(void *, bool);
void (*sap_connected)(void *);
};
#if IS_ENABLED(CONFIG_IWLMEI)
#error No MEI support in FreeBSD currently
#else
static __inline void
iwl_mei_device_down(void)
{
}
static __inline struct iwl_mei_nvm *
iwl_mei_get_nvm(void)
{
return (NULL);
}
static __inline int
iwl_mei_get_ownership(void)
{
return (0);
}
static __inline void
iwl_mei_host_disassociated(void)
{
}
static __inline bool
iwl_mei_is_connected(void)
{
return (false);
}
static __inline void
iwl_mei_set_country_code(uint16_t mcc __unused)
{
}
static __inline void
iwl_mei_set_netdev(struct net_device *netdevice __unused)
{
}
static __inline void
iwl_mei_set_nic_info(const uint8_t *addr __unused, const uint8_t *hw_addr __unused)
{
}
static __inline void
iwl_mei_set_rfkill_state(bool rf_killed __unused, bool sw_rfkill __unused)
{
}
static __inline void
iwl_mei_tx_copy_to_csme(struct sk_buff *skb __unused, unsigned int ivlen __unused)
{
}
static __inline int
iwl_mei_register(void *mvm __unused, const struct iwl_mei_ops *ops __unused)
{
return (0);
}
static __inline void
iwl_mei_start_unregister(void)
{
}
static __inline void
iwl_mei_unregister_complete(void)
{
}
#endif
#endif /* _IWL_MEI_IWL_MEI_H */

View File

@ -107,7 +107,7 @@
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_160:
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD,
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver >= 13) {
@ -511,7 +511,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
if (sta->mfp)
if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_PUT_FLAG(PMF);
rcu_read_unlock();
@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v5,
.len[0] = sizeof(cmd_v5),
@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v7 cmd_v7;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v7,
.len[0] = sizeof(cmd_v7),
@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v11 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v12 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v13 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD,
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
iwl_mvm_ftm_reset(mvm);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
LOCATION_GROUP, 0),
if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "failed to abort FTM process\n");
}
@ -1066,7 +1065,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100;
rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
IWL_DEBUG_INFO(mvm,
"%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",

View File

@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
* The command structure is the same for versions 6, 7 and 8 (only the
@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_CONFIG_CMD, 6);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
int err;
int cmd_size;
@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
LOCATION_GROUP, 0),
0, cmd_size, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
}
static int
@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
};
u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
.data[1] = &data,
@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
{
struct iwl_tof_responder_dyn_config_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
LOCATION_GROUP, 0),
.id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
/* may not be able to DMA from stack */
@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
struct ieee80211_ftm_responder_params *params)
{
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_DYN_CONFIG_CMD, 2);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
2);
switch (cmd_ver) {
case 2:
@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
.addr = addr,
.hltk = hltk,
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_DYN_CONFIG_CMD, 2);
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
2);
lockdep_assert_held(&mvm->mutex);

View File

@ -15,7 +15,7 @@
#include "fw/api/datapath.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
#include "fw/api/soc.h"
#include "fw/api/system.h"
#include "fw/api/alive.h"
#include "fw/api/binding.h"
#include "fw/api/cmdhdr.h"

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -12,8 +12,6 @@
#include "iwl-op-mode.h"
#include "fw/img.h"
#include "iwl-debug.h"
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-prph.h"
#include "fw/acpi.h"
#include "fw/pnvm.h"
@ -27,11 +25,12 @@
#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
#define UCODE_VALID_OK cpu_to_le32(0x1)
#define IWL_PPAG_MASK 3
#define IWL_PPAG_ETSI_MASK BIT(0)
#define IWL_TAS_US_MCC 0x5553
#define IWL_TAS_CANADA_MCC 0x4341
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@ -78,7 +77,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
struct iwl_dqa_enable_cmd dqa_cmd = {
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
};
u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
@ -123,13 +122,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
u32 lmac_error_event_table, umac_error_table;
u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
/*
* For v5 and above, we can check the version, for older
* versions we need to check the size.
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0) == 5) {
if (version == 6) {
struct iwl_alive_ntf_v6 *palive;
if (pkt_len < sizeof(*palive))
return false;
palive = (void *)pkt->data;
mvm->trans->dbg.imr_data.imr_enable =
le32_to_cpu(palive->imr.enabled);
mvm->trans->dbg.imr_data.imr_size =
le32_to_cpu(palive->imr.size);
mvm->trans->dbg.imr_data.imr2sram_remainbyte =
mvm->trans->dbg.imr_data.imr_size;
mvm->trans->dbg.imr_data.imr_base_addr =
palive->imr.base_addr;
mvm->trans->dbg.imr_data.imr_curr_addr =
le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
mvm->trans->dbg.imr_data.imr_enable,
mvm->trans->dbg.imr_data.imr_size,
le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
}
if (version >= 5) {
struct iwl_alive_ntf_v5 *palive;
if (pkt_len < sizeof(*palive))
@ -246,6 +265,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
enum iwl_device_family device_family = trans->trans_cfg->device_family;
if (device_family < IWL_DEVICE_FAMILY_8000)
return;
if (device_family <= IWL_DEVICE_FAMILY_9000)
IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
else
IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@ -311,6 +350,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_read_prph(trans, SB_CPU_2_STATUS));
}
iwl_mvm_print_pd_notification(mvm);
/* LMAC/UMAC PC info */
if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
@ -516,7 +557,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
}
}
#else /* CONFIG_ACPI */
static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
@ -525,8 +565,51 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
}
#endif /* CONFIG_ACPI */
#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
SAR_OFFSET_MAPPING_TABLE_CMD),
.flags = 0,
.data[0] = &mvm->fwrt.sgom_table,
.len[0] = sizeof(mvm->fwrt.sgom_table),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (!mvm->fwrt.sgom_enabled) {
IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
return 0;
}
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 2) {
IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
cmd_ver);
return 0;
}
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
return ret;
}
#else
static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
{
return 0;
}
#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
u32 cmd_id = PHY_CONFIGURATION_CMD;
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
struct iwl_phy_specific_cfg phy_filters = {};
@ -558,8 +641,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
PHY_CONFIGURATION_CMD,
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
@ -571,8 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg);
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
sizeof(struct iwl_phy_cfg_cmd_v1);
return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
cmd_size, &phy_cfg_cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
@ -715,6 +796,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
@ -722,8 +804,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
REDUCE_TX_POWER_CMD,
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 6) {
@ -757,8 +838,10 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
if (ret)
return ret;
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@ -767,9 +850,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
struct iwl_host_cmd cmd;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
PER_CHAIN_LIMIT_OFFSET_CMD,
struct iwl_host_cmd cmd = {
.id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
.flags = CMD_WANT_SKB,
.data = { &geo_tx_cmd },
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
/* the ops field is at the same spot for all versions, so set in v1 */
@ -791,12 +877,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
cmd = (struct iwl_host_cmd){
.id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
.len = { len, },
.flags = CMD_WANT_SKB,
.data = { &geo_tx_cmd },
};
cmd.len[0] = len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@ -816,13 +897,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
u32 n_profiles;
u32 sk = 0;
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
PER_CHAIN_LIMIT_OFFSET_CMD,
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
@ -879,24 +961,28 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
/* Only set to South Korea if the table revision is 1 */
if (mvm->fwrt.geo_rev == 1)
sk = 1;
/*
* Set the revision on versions that contain it.
* Set the table_revision to South Korea (1) or not (0). The
* element name is misleading, as it doesn't contain the table
* revision number, but whether the South Korea variation
* should be used.
* This must be done after calling iwl_sar_geo_init().
*/
if (cmd_ver == 5)
cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
cmd.v5.table_revision = cpu_to_le32(sk);
else if (cmd_ver == 4)
cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
cmd.v4.table_revision = cpu_to_le32(sk);
else if (cmd_ver == 3)
cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
cmd.v3.table_revision = cpu_to_le32(sk);
else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
cmd.v2.table_revision = cpu_to_le32(sk);
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(PHY_OPS_GROUP,
PER_CHAIN_LIMIT_OFFSET_CMD),
0, len, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
@ -904,13 +990,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
s8 *gain;
/*
* The 'flags' field is the same in v1 and in v2 so we can just
* use v1 to access it.
*/
mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
mvm->fwrt.ppag_flags = 0;
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
@ -922,8 +1003,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev == 1 || tbl_rev == 2) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = mvm->fwrt.ppag_table.v2.gain[0];
mvm->fwrt.ppag_ver = tbl_rev;
IWL_DEBUG_RADIO(mvm,
"Reading PPAG table v2 (tbl_rev=%d)\n",
tbl_rev);
@ -943,8 +1022,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = mvm->fwrt.ppag_table.v1.gain[0];
mvm->fwrt.ppag_ver = 0;
IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
goto read_table;
}
@ -952,6 +1029,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
read_table:
mvm->fwrt.ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
if (flags->type != ACPI_TYPE_INTEGER) {
@ -959,10 +1037,9 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value &
IWL_PPAG_MASK);
mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
if (!mvm->fwrt.ppag_table.v1.flags) {
if (!mvm->fwrt.ppag_flags) {
ret = 0;
goto out_free;
}
@ -982,15 +1059,15 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
gain[i * num_sub_bands + j] = ent->integer.value;
mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
if ((j == 0 &&
(gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
(mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
(j != 0 &&
(gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
(mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
mvm->fwrt.ppag_flags = 0;
ret = -EINVAL;
goto out_free;
}
@ -1005,6 +1082,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
union iwl_ppag_table_cmd cmd;
u8 cmd_ver;
int i, j, ret, num_sub_bands, cmd_size;
s8 *gain;
@ -1014,37 +1092,39 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
"PPAG capability not supported by FW, command not sent.\n");
return 0;
}
if (!mvm->fwrt.ppag_table.v1.flags) {
if (!mvm->fwrt.ppag_flags) {
IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
return 0;
}
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD,
/* The 'flags' field is the same in v1 and in v2 so we can just
* use v1 to access it.
*/
cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = mvm->fwrt.ppag_table.v1.gain[0];
cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
gain = cmd.v1.gain[0];
cmd_size = sizeof(cmd.v1);
if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table rev is %d but FW supports v1, sending truncated table\n",
mvm->fwrt.ppag_ver);
mvm->fwrt.ppag_table.v1.flags &=
cpu_to_le32(IWL_PPAG_ETSI_MASK);
cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else if (cmd_ver == 2 || cmd_ver == 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = mvm->fwrt.ppag_table.v2.gain[0];
cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
gain = cmd.v2.gain[0];
cmd_size = sizeof(cmd.v2);
if (mvm->fwrt.ppag_ver == 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v1 but FW supports v2, sending padded table\n");
} else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
mvm->fwrt.ppag_table.v1.flags &=
cpu_to_le32(IWL_PPAG_ETSI_MASK);
cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
}
} else {
IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
@ -1053,6 +1133,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
gain[i * num_sub_bands + j] =
mvm->fwrt.ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
@ -1061,7 +1143,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
0, cmd_size, &mvm->fwrt.ppag_table);
0, cmd_size, &cmd);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@ -1100,20 +1182,66 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0);
mvm->fwrt.ppag_flags = 0;
return 0;
}
return iwl_mvm_ppag_send_cmd(mvm);
}
static const struct dmi_system_id dmi_tas_approved_list[] = {
{ .ident = "HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
},
},
{ .ident = "SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "LENOVO",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
},
},
{ .ident = "DELL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
},
},
/* keep last */
{}
};
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
if (size >= IWL_TAS_BLOCK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
if (list[i] == cpu_to_le32(mcc))
return true;
}
list[size++] = cpu_to_le32(mcc);
*le_size = cpu_to_le32(size);
return true;
}
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
struct iwl_tas_config_cmd cmd = {};
int list_size;
union iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@ -1121,7 +1249,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.block_list_array, &list_size);
fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@ -1129,15 +1260,31 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
if (list_size < 0)
if (ret == 0)
return;
/* list size if TAS enabled can only be non-negative */
cmd.block_list_size = cpu_to_le32((u32)list_size);
if (!dmi_check_system(dmi_tas_approved_list)) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
&cmd.v4.block_list_size,
IWL_TAS_US_MCC)) ||
(!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
&cmd.v4.block_list_size,
IWL_TAS_CANADA_MCC))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
TAS_CONFIG),
0, sizeof(cmd), &cmd);
/* v4 is the same size as v3, so no need to differentiate here */
cmd_size = fw_ver < 3 ?
sizeof(struct iwl_tas_config_cmd_v2) :
sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
@ -1170,7 +1317,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
u32 value;
struct iwl_lari_config_change_cmd_v5 cmd = {};
struct iwl_lari_config_change_cmd_v6 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
@ -1197,25 +1344,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
DSM_FUNC_FORCE_DISABLE_CHANNELS,
&iwl_guid, &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
if (cmd.config_bitmap ||
cmd.oem_uhb_allow_bitmap ||
cmd.oem_11ax_allow_bitmap ||
cmd.oem_unii4_allow_bitmap ||
cmd.chan_state_active_bitmap) {
cmd.chan_state_active_bitmap ||
cmd.force_disable_channels_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE, 1);
if (cmd_ver == 5)
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
1);
switch (cmd_ver) {
case 6:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
break;
case 5:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
else if (cmd_ver == 4)
break;
case 4:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
else if (cmd_ver == 3)
break;
case 3:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
else if (cmd_ver == 2)
break;
case 2:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
else
break;
default:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
break;
}
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
@ -1227,8 +1392,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
le32_to_cpu(cmd.chan_state_active_bitmap),
cmd_ver);
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_uhb_allow_bitmap));
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_uhb_allow_bitmap),
le32_to_cpu(cmd.force_disable_channels_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@ -1336,6 +1502,7 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
{
}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@ -1401,7 +1568,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (iwl_mvm_has_unified_ucode(mvm))
return iwl_run_unified_mvm_ucode(mvm);
WARN_ON(!mvm->nvm_data);
ret = iwl_run_init_mvm_ucode(mvm);
if (ret) {
@ -1528,9 +1694,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* internal aux station for all aux activities that don't
* requires a dedicated data queue.
*/
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA,
0) < 12) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* In old version the aux station uses mac id like other
* station and not lmac id
@ -1545,8 +1709,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
while (!sband && i < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[i++];
if (WARN_ON_ONCE(!sband))
if (WARN_ON_ONCE(!sband)) {
ret = -ENODEV;
goto error;
}
chan = &sband->channels[0];
@ -1631,6 +1797,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
else if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
if (ret)
goto error;
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
@ -1683,9 +1853,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA,
0) < 12) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* Add auxiliary station for scanning.
* Newer versions of this command implies that the fw uses
@ -1705,20 +1873,6 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return ret;
}
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
u32 flags = le32_to_cpu(card_state_notif->flags);
IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_KILL_CARD_DISABLED) ?
"Reached" : "Not reached");
}
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{

View File

@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
LONG_GROUP,
BEACON_TEMPLATE_CMD,
0) > 10;
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
if (rate_idx <= IWL_FIRST_CCK_RATE)
flags |= is_new_rate ? IWL_MAC_BEACON_CCK
@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
!IWL_MVM_DISABLE_AP_FILS) {
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
BEACON_TEMPLATE_CMD,
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
IWL_MAC_BEACON_FILS_V1;
@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct sk_buff *skb;
u8 *data;
u32 size = le32_to_cpu(sb->byte_count);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
STORED_BEACON_NTF, 0);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
0);
if (size == 0)
return;
@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
/*
* if we don't know about an ongoing channel switch,
* make sure FW cancels it
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_ERROR_NOTIF,
0) && !vif->csa_active) {
IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
break;
}
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
@ -1615,6 +1624,31 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(notif->mac_id);
u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
rcu_read_lock();
vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
if (!vif) {
rcu_read_unlock();
return;
}
IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
id, csa_err_mask);
if (csa_err_mask & (CS_ERR_COUNT_ERROR |
CS_ERR_LONG_DELAY_AFTER_CS |
CS_ERR_TX_BLOCK_TIMER_EXPIRED))
ieee80211_channel_switch_disconnect(vif, true);
rcu_read_unlock();
}
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -22,6 +22,7 @@
#include <linux/udp.h>
#endif
#include "iwl-drv.h"
#include "iwl-op-mode.h"
#include "iwl-io.h"
#include "mvm.h"
@ -198,6 +199,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
PTR_ERR_OR_ZERO(resp));
resp = NULL;
goto out;
}
@ -219,7 +221,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le16_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
PTR_ERR_OR_ZERO(regd));
@ -231,7 +232,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
out:
kfree(resp);
return regd;
}
@ -631,8 +635,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN) == 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
@ -645,14 +648,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
/* Old firmware also supports probe deferral and suppression */
if (scan_ver < 15)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
}
if (mvm->nvm_data->sku_cap_11ax_enable &&
@ -714,8 +722,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->netdev_features |= mvm->cfg->features;
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
NETIF_F_RXCSUM);
hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
if (mvm->cfg->vht_mu_mimo_supported)
wiphy_ext_feature_set(hw->wiphy,
@ -725,6 +732,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_PROTECTED_TWT);
iwl_mvm_vendor_cmds_register(mvm);
hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
@ -1091,6 +1100,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_mei_get_ownership(mvm);
if (ret)
return ret;
if (mvm->mei_nvm_data) {
/* We got the NIC, we can now free the MEI NVM data */
kfree(mvm->mei_nvm_data);
mvm->mei_nvm_data = NULL;
/*
* We can't free the nvm_data we allocated based on the SAP
* data because we registered to cfg80211 with the channels
* allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data
* just in order to be able free it later.
* NULLify nvm_data so that we will read the NVM from the
* firmware this time.
*/
mvm->temp_nvm_data = mvm->nvm_data;
mvm->nvm_data = NULL;
}
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@ -1125,11 +1155,34 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
ret = __iwl_mvm_mac_start(mvm);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
*/
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
for (retry = 0; retry <= max_retry; retry++) {
ret = __iwl_mvm_mac_start(mvm);
if (!ret)
break;
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
return ret;
}
@ -1185,7 +1238,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@ -1247,6 +1300,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
*/
flush_work(&mvm->roc_done_wk);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
@ -1275,6 +1330,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
@ -1282,8 +1338,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
.common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
REDUCE_TX_POWER_CMD,
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@ -1303,7 +1358,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@ -1364,6 +1419,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
/*
* In the new flow since FW is in charge of the timing,
* if driver has canceled the channel switch he will receive the
* CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_ERROR_NOTIF, 0))
return;
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
@ -1517,6 +1581,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->monitor_on = true;
iwl_mvm_vif_dbgfs_register(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
!mvm->csme_vif && mvm->mei_registered) {
iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
mvm->csme_vif = vif;
}
goto out_unlock;
out_unbind:
@ -1569,6 +1642,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (vif == mvm->csme_vif) {
iwl_mei_set_netdev(NULL);
mvm->csme_vif = NULL;
}
probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
lockdep_is_held(&mvm->mutex));
RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
@ -1725,7 +1803,11 @@ static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
if (pass_all) {
cmd->pass_all = 1;
#if defined(__linux__)
return (u64)(unsigned long)cmd;
#elif defined(__FreeBSD__)
return (u64)(uintptr_t)cmd;
#endif
}
netdev_hw_addr_list_for_each(addr, mc_list) {
@ -1741,7 +1823,11 @@ static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
cmd->count++;
}
#if defined(__linux__)
return (u64)(unsigned long)cmd;
#elif defined(__FreeBSD__)
return (u64)(uintptr_t)cmd;
#endif
}
static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
@ -1750,7 +1836,11 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
#if defined(__linux__)
struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
#elif defined(__FreeBSD__)
struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast;
#endif
mutex_lock(&mvm->mutex);
@ -2021,11 +2111,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
return res;
}
static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
{
int i;
/*
* FW currently supports only nss == MAX_HE_SUPP_NSS
*
* If nss > MAX: we can ignore values we don't support
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
for (i = 0; i < nss; i++) {
u8 ru_index_tmp = ru_index_bitmap << 1;
u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
u8 bw;
for (bw = 0;
bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
bw++) {
ru_index_tmp >>= 1;
if (!(ru_index_tmp & 1))
continue;
high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
}
}
}
static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_he_pkt_ext_v2 *pkt_ext)
{
u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
u8 *ppe = &sta->he_cap.ppe_thres[0];
u8 ru_index_bitmap =
u8_get_bits(*ppe,
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
/* Starting after PPE header */
u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
}
static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
u8 nominal_padding,
u32 *flags)
{
int low_th = -1;
int high_th = -1;
int i;
switch (nominal_padding) {
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
}
/* Set the PPE thresholds accordingly */
if (low_th >= 0 && high_th >= 0) {
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
u8 bw;
for (bw = 0;
bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
bw++) {
pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
}
}
*flags |= STA_CTXT_HE_PACKET_EXT;
}
}
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
.bss_color = vif->bss_conf.he_bss_color.color,
@ -2033,16 +2220,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
int size = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_MBSSID_HE) ?
sizeof(sta_ctxt_cmd) :
sizeof(struct iwl_he_sta_context_cmd_v1);
struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
int size;
struct ieee80211_sta *sta;
u32 flags;
int i;
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
void *cmd;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
ver = 1;
switch (ver) {
case 1:
/* same layout as v2 except some data at the end */
cmd = &sta_ctxt_cmd_v2;
size = sizeof(struct iwl_he_sta_context_cmd_v1);
break;
case 2:
cmd = &sta_ctxt_cmd_v2;
size = sizeof(struct iwl_he_sta_context_cmd_v2);
break;
case 3:
cmd = &sta_ctxt_cmd;
size = sizeof(struct iwl_he_sta_context_cmd_v3);
break;
default:
IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
return;
}
rcu_read_lock();
@ -2107,97 +2317,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
* Initialize the PPE thresholds to "None" (7), as described in Table
* 9-262ac of 80211.ax/D3.0.
*/
memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
u8 nss = (sta->he_cap.ppe_thres[0] &
IEEE80211_PPE_THRES_NSS_MASK) + 1;
u8 ru_index_bitmap =
(sta->he_cap.ppe_thres[0] &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
u8 *ppe = &sta->he_cap.ppe_thres[0];
u8 ppe_pos_bit = 7; /* Starting after PPE header */
/*
* FW currently supports only nss == MAX_HE_SUPP_NSS
*
* If nss > MAX: we can ignore values we don't support
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
for (i = 0; i < nss; i++) {
u8 ru_index_tmp = ru_index_bitmap << 1;
u8 bw;
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
ru_index_tmp >>= 1;
if (!(ru_index_tmp & 1))
continue;
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
iwl_mvm_he_get_ppe_val(ppe,
ppe_pos_bit);
ppe_pos_bit +=
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
iwl_mvm_he_get_ppe_val(ppe,
ppe_pos_bit);
ppe_pos_bit +=
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
}
}
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
&sta_ctxt_cmd.pkt_ext);
flags |= STA_CTXT_HE_PACKET_EXT;
} else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] &
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK) !=
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
int low_th = -1;
int high_th = -1;
/* Take the PPE thresholds from the nominal padding info */
switch (sta->he_cap.he_cap_elem.phy_cap_info[9] &
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK) {
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
low_th = IWL_HE_PKT_EXT_BPSK;
high_th = IWL_HE_PKT_EXT_NONE;
break;
case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
low_th = IWL_HE_PKT_EXT_NONE;
high_th = IWL_HE_PKT_EXT_BPSK;
break;
}
/* Set the PPE thresholds accordingly */
if (low_th >= 0 && high_th >= 0) {
struct iwl_he_pkt_ext *pkt_ext =
(struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
u8 bw;
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
bw++) {
pkt_ext->pkt_ext_qam_th[i][bw][0] =
low_th;
pkt_ext->pkt_ext_qam_th[i][bw][1] =
high_th;
}
}
flags |= STA_CTXT_HE_PACKET_EXT;
}
/* PPE Thresholds doesn't exist - set the API PPE values
* according to Common Nominal Packet Padding fiels. */
} else {
u8 nominal_padding =
u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
nominal_padding,
&flags);
}
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
@ -2260,9 +2398,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.flags = cpu_to_le32(flags);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
DATA_PATH_GROUP, 0),
0, size, &sta_ctxt_cmd))
if (ver < 3) {
/* fields before pkt_ext */
BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
offsetof(typeof(sta_ctxt_cmd), pkt_ext));
/* pkt_ext */
for (i = 0;
i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
i++) {
u8 bw;
for (bw = 0;
bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
bw++) {
BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
&sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
}
}
/* fields after pkt_ext */
BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
sizeof(sta_ctxt_cmd_v2) -
offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
memcpy((u8 *)&sta_ctxt_cmd_v2 +
offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
(u8 *)&sta_ctxt_cmd +
offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
sizeof(sta_ctxt_cmd) -
offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
sta_ctxt_cmd_v2.reserved3 = 0;
}
if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@ -2401,6 +2576,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IEEE80211_SMPS_DYNAMIC);
}
} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
iwl_mvm_mei_host_disassociated(mvm);
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@ -2477,11 +2653,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
* A firmware with the new API will remove it automatically.
*/
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@ -3137,6 +3310,69 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
}
}
static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
{
#if IS_ENABLED(CONFIG_IWLMEI)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mei_conn_info conn_info = {
.ssid_len = vif->bss_conf.ssid_len,
.channel = vif->bss_conf.chandef.chan->hw_value,
};
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return;
if (!mvm->mei_registered)
return;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_CCMP:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP;
break;
case WLAN_CIPHER_SUITE_GCMP:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256;
break;
case 0:
/* open profile */
break;
default:
/* cipher not supported, don't send anything to iwlmei */
return;
}
switch (mvmvif->rekey_data.akm) {
case WLAN_AKM_SUITE_SAE & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE;
break;
case WLAN_AKM_SUITE_PSK & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK;
break;
case WLAN_AKM_SUITE_8021X & 0xff:
conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA;
break;
case 0:
/* open profile */
conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN;
break;
default:
/* auth method / AKM not supported */
/* TODO: All the FT vesions of these? */
return;
}
memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len);
memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
/* TODO: add support for collocated AP data */
iwl_mei_host_associated(&conn_info, NULL);
#endif
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -3281,12 +3517,18 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* multicast data frames can be forwarded to the driver
*/
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* once we move into assoc state, need to update rate scale to
* disable using wide bandwidth
*/
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
false);
if (!sta->tdls) {
/* Multicast data frames are no longer allowed */
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
@ -3484,12 +3726,15 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_sta *mvmsta = NULL;
struct iwl_mvm_key_pn *ptk_pn;
int keyidx = key->keyidx;
int ret, i;
u8 key_offset;
if (sta)
mvmsta = iwl_mvm_sta_from_mac80211(sta);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!mvm->trans->trans_cfg->gen2) {
@ -3590,7 +3835,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
}
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
sta && iwl_mvm_has_new_rx_api(mvm) &&
mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
@ -3598,7 +3843,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_seq seq;
int tid, q;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
ptk_pn = kzalloc(struct_size(ptk_pn, q,
mvm->trans->num_rx_queues),
@ -3625,6 +3869,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
else
key_offset = STA_KEY_IDX_INVALID;
if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
mvmsta->pairwise_cipher = key->cipher;
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
@ -3665,12 +3912,11 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
}
if (sta && iwl_mvm_has_new_rx_api(mvm) &&
if (mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP ||
key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
ptk_pn = rcu_dereference_protected(
mvmsta->ptk_pn[keyidx],
lockdep_is_held(&mvm->mutex));
@ -3909,8 +4155,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA, 0) >= 12) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
u32 lmac_id;
lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
@ -4713,6 +4958,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
/*
* In the new flow FW is in charge of timing the switch so there
* is no need for all of this
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_ERROR_NOTIF,
0))
break;
/*
* We haven't configured the firmware to be associated yet since
* we don't know the dtim period. In this case, the firmware can't
@ -4784,6 +5038,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
.cs_mode = chsw->block_tx,
};
/*
* In the new flow FW is in charge of timing the switch so there is no
* need for all of this
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_ERROR_NOTIF, 0))
return;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
return;
@ -5390,6 +5652,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_tx_csum_bz(mvm, head, true) ==
iwl_mvm_tx_csum_bz(mvm, skb, true);
/* For now don't aggregate IPv6 in AMSDU */
if (skb->protocol != htons(ETH_P_IP))
return false;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -32,6 +32,7 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
#include "mei/iwl-mei.h"
#include "iwl-nvm-parse.h"
#include <linux/average.h>
@ -98,11 +99,10 @@ struct iwl_mvm_phy_ctxt {
enum nl80211_chan_width width;
/*
* TODO: This should probably be removed. Currently here only for rate
* scaling algorithm
*/
struct ieee80211_channel *channel;
/* track for RLC config command */
u32 center_freq1;
};
struct iwl_mvm_time_event_data {
@ -838,6 +838,18 @@ struct iwl_mvm {
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
struct iwl_mei_nvm *mei_nvm_data;
struct iwl_mvm_csme_conn_info __rcu *csme_conn_info;
bool mei_rfkill_blocked;
bool mei_registered;
struct work_struct sap_connected_wk;
/*
* NVM built based on the SAP data but that we can't free even after
* we get ownership because it contains the cfg80211's channel.
*/
struct iwl_nvm_data *temp_nvm_data;
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@ -1029,6 +1041,8 @@ struct iwl_mvm {
/* Indicate if 32Khz external clock is valid */
u32 ext_clock_valid;
/* This vif used by CSME to send / receive traffic */
struct ieee80211_vif *csme_vif;
struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
u8 csa_tx_block_bcn_timeout;
@ -1091,7 +1105,6 @@ struct iwl_mvm {
} cmd_ver;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
@ -1111,6 +1124,8 @@ struct iwl_mvm {
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
};
/* Extract MVM priv from op_mode and _hw */
@ -1131,6 +1146,10 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
* @IWL_MVM_STATUS_STARTING: starting mac,
* used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@ -1142,6 +1161,13 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
};
struct iwl_mvm_csme_conn_info {
struct rcu_head rcu_head;
struct iwl_mei_conn_info conn_info;
};
/* Keep track of completed init configuration */
@ -1501,6 +1527,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
unsigned int tid);
u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
@ -1606,8 +1633,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@ -1668,6 +1693,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@ -1929,10 +1956,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
void iwl_mvm_stop_device(struct iwl_mvm *mvm);
/* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn);
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
@ -1947,6 +1970,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
#if IS_ENABLED(CONFIG_IWLMEI)
/* vendor commands */
void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm);
#else
static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {}
#endif
/* Location Aware Regulatory */
struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
@ -2071,6 +2105,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
@ -2145,8 +2181,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
@ -2166,4 +2201,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
return IWL_LOCATION_CIPHER_INVALID;
}
}
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm);
static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
return iwl_mei_get_ownership();
return 0;
}
static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm,
struct sk_buff *skb,
unsigned int ivlen)
{
if (mvm->mei_registered)
iwl_mei_tx_copy_to_csme(skb, ivlen);
}
static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
iwl_mei_host_disassociated();
}
static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
{
if (mvm->mei_registered)
iwl_mei_device_down();
}
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
sw_rfkill);
}
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
#endif /* __IWL_MVM_H__ */

View File

@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
PROT_OFFLOAD_CONFIG_CMD, 0);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

View File

@ -8,6 +8,9 @@
#define LINUXKPI_PARAM_PREFIX iwlwifi_mvm_
#endif
#include <linux/module.h>
#if defined(__linux__)
#include <linux/rtnetlink.h>
#endif
#include <linux/vmalloc.h>
#include <net/mac80211.h>
@ -26,9 +29,11 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw/api/scan.h"
#include "fw/api/rfi.h"
#include "time-event.h"
#include "fw-api.h"
#include "fw/acpi.h"
#include "fw/uefi.h"
#if defined(__linux__)
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
@ -38,6 +43,7 @@ MODULE_LICENSE("GPL");
MODULE_LICENSE("BSD");
#endif
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_IMPORT_NS(IWLWIFI);
static const struct iwl_op_mode_ops iwl_mvm_ops;
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
@ -94,7 +100,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
u32 reg_val = 0;
u32 reg_val;
u32 phy_config = iwl_mvm_get_phy_config(mvm);
radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
@ -105,10 +111,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
FW_PHY_CFG_RADIO_DASH_POS;
/* SKU control */
reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
/* radio configuration */
reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
@ -133,8 +136,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
@ -253,7 +255,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
*/
mvm->fw_static_smps_request =
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
ieee80211_iterate_interfaces(mvm->hw,
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
iwl_mvm_intf_dual_chain_req, NULL);
}
@ -276,6 +279,7 @@ enum iwl_rx_handler_context {
/**
* struct iwl_rx_handlers handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
* @fn: the function is called when notification is received
*/
@ -350,9 +354,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
RX_HANDLER_SYNC, struct iwl_card_state_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
@ -402,6 +403,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
iwl_mvm_channel_switch_start_notif,
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
iwl_mvm_channel_switch_error_notif,
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@ -410,6 +415,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_thermal_dual_chain_req,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_thermal_dual_chain_request),
RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_rfi_deactivate_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@ -463,7 +472,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
HCMD_NAME(DC2DC_CONFIG_CMD),
HCMD_NAME(NVM_ACCESS_CMD),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
@ -473,7 +481,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(STATISTICS_NOTIFICATION),
HCMD_NAME(EOSP_NOTIFICATION),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(CARD_STATE_NOTIFICATION),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(TDLS_CONFIG_CMD),
HCMD_NAME(MAC_PM_POWER_TABLE),
@ -518,6 +525,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
HCMD_NAME(RFI_DEACTIVATE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@ -550,9 +561,11 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RLC_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@ -651,13 +664,11 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_fwrt_dump_start(void *ctx)
static void iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
mutex_lock(&mvm->mutex);
return 0;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
@ -699,13 +710,45 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
int ret;
if (trans->csme_own) {
if (WARN(!mvm->mei_registered,
"csme is owner, but we aren't registered to iwlmei\n"))
goto get_nvm_from_fw;
mvm->mei_nvm_data = iwl_mei_get_nvm();
if (mvm->mei_nvm_data) {
/*
* mvm->mei_nvm_data is set and because of that,
* we'll load the NVM from the FW when we'll get
* ownership.
*/
mvm->nvm_data =
iwl_parse_mei_nvm_data(trans, trans->cfg,
mvm->mei_nvm_data, mvm->fw);
return 0;
}
IWL_ERR(mvm,
"Got a NULL NVM from CSME, trying to get it from the device\n");
}
get_nvm_from_fw:
rtnl_lock();
wiphy_lock(mvm->hw->wiphy);
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
ret = iwl_trans_start_hw(mvm->trans);
if (ret) {
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
return ret;
}
ret = iwl_run_init_mvm_ucode(mvm);
if (ret && ret != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!ret && iwl_mvm_is_lar_supported(mvm)) {
@ -717,9 +760,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (ret < 0)
if (ret)
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
return ret;
@ -727,6 +771,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
{
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
int ret;
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
@ -734,10 +779,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
ret = iwl_mvm_mac_setup_register(mvm);
if (ret)
return ret;
mvm->hw_registered = true;
iwl_mvm_dbgfs_register(mvm);
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
mvm->mei_rfkill_blocked,
RFKILL_HARD_BLOCK_NOT_OWNER);
iwl_mvm_mei_set_sw_rfkill_state(mvm);
return 0;
}
@ -918,6 +970,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
.frob_mem = iwl_mvm_frob_mem,
};
static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
{
struct iwl_mvm *mvm = priv;
struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
/*
* This is protected by the guarantee that this function will not be
* called twice on two different threads
*/
prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
if (!curr_conn_info)
return;
curr_conn_info->conn_info = *conn_info;
rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
if (prev_conn_info)
kfree_rcu(prev_conn_info, rcu_head);
}
static void iwl_mvm_mei_rfkill(void *priv, bool blocked)
{
struct iwl_mvm *mvm = priv;
mvm->mei_rfkill_blocked = blocked;
if (!mvm->hw_registered)
return;
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
mvm->mei_rfkill_blocked,
RFKILL_HARD_BLOCK_NOT_OWNER);
}
static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
{
struct iwl_mvm *mvm = priv;
if (!mvm->hw_registered || !mvm->csme_vif)
return;
iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
}
static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, sap_connected_wk);
int ret;
ret = iwl_mvm_start_get_nvm(mvm);
if (ret)
goto out_free;
ret = iwl_mvm_start_post_nvm(mvm);
if (ret)
goto out_free;
return;
out_free:
IWL_ERR(mvm, "Couldn't get started...\n");
iwl_mei_start_unregister();
iwl_mei_unregister_complete();
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_mvm_thermal_exit(mvm);
iwl_fw_runtime_free(&mvm->fwrt);
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(mvm->trans);
kfree(mvm->nvm_data);
kfree(mvm->mei_nvm_data);
ieee80211_free_hw(mvm->hw);
}
static void iwl_mvm_mei_sap_connected(void *priv)
{
struct iwl_mvm *mvm = priv;
if (!mvm->hw_registered)
schedule_work(&mvm->sap_connected_wk);
}
static void iwl_mvm_mei_nic_stolen(void *priv)
{
struct iwl_mvm *mvm = priv;
rtnl_lock();
cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
rtnl_unlock();
}
static const struct iwl_mei_ops mei_ops = {
.me_conn_status = iwl_mvm_me_conn_status,
.rfkill = iwl_mvm_mei_rfkill,
.roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
.sap_connected = iwl_mvm_mei_sap_connected,
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -929,9 +1084,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
int err, scan_size;
int scan_size;
u32 min_backoff;
enum iwl_amsdu_size rb_size_default;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
/*
* We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
@ -950,12 +1105,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
op_mode = hw->priv;
@ -970,6 +1125,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
iwl_mvm_get_acpi_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
mvm->init_status = 0;
@ -1031,6 +1187,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@ -1072,14 +1229,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
rb_size_default = IWL_AMSDU_2K;
else
rb_size_default = IWL_AMSDU_4K;
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
trans_cfg.rx_buf_size = rb_size_default;
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
break;
case IWL_AMSDU_4K:
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
@ -1093,7 +1245,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
default:
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
iwlwifi_mod_params.amsdu_size);
trans_cfg.rx_buf_size = rb_size_default;
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
}
trans->wide_cmd_header = true;
@ -1121,6 +1273,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
trans_cfg.queue_alloc_cmd_ver =
iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
SCD_QUEUE_CONFIG_CMD),
0);
mvm->sta_remove_requires_queue_remove =
trans_cfg.queue_alloc_cmd_ver > 0;
/* Configure transport layer */
iwl_trans_configure(mvm->trans, &trans_cfg);
@ -1153,10 +1313,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
err = iwl_trans_start_hw(mvm->trans);
if (err)
goto out_free;
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
@ -1181,8 +1337,20 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->debugfs_dir = dbgfs_dir;
if (iwl_mvm_start_get_nvm(mvm))
mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
if (iwl_mvm_start_get_nvm(mvm)) {
/*
* Getting NVM failed while CSME is the owner, but we are
* registered to MEI, we'll get the NVM later when it'll be
* possible to get it from CSME.
*/
if (trans->csme_own && mvm->mei_registered)
return op_mode;
goto out_thermal_exit;
}
if (iwl_mvm_start_post_nvm(mvm))
goto out_thermal_exit;
@ -1191,6 +1359,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_thermal_exit:
iwl_mvm_thermal_exit(mvm);
if (mvm->mei_registered) {
iwl_mei_start_unregister();
iwl_mei_unregister_complete();
}
out_free:
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@ -1217,6 +1389,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
iwl_fw_dump_conf_clear(&mvm->fwrt);
iwl_mvm_mei_device_down(mvm);
}
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
@ -1224,11 +1397,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int i;
if (mvm->mei_registered) {
rtnl_lock();
iwl_mei_set_netdev(NULL);
rtnl_unlock();
iwl_mei_start_unregister();
}
/*
* After we unregister from mei, the worker can't be scheduled
* anymore.
*/
cancel_work_sync(&mvm->sap_connected_wk);
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
ieee80211_unregister_hw(mvm->hw);
/*
* If we couldn't get ownership on the device and we couldn't
* get the NVM from CSME, we haven't registered to mac80211.
* In that case, we didn't fail op_mode_start, because we are
* waiting for CSME to allow us to get the NVM to register to
* mac80211. If that didn't happen, we haven't registered to
* mac80211, hence the if below.
*/
if (mvm->hw_registered)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
kfree(mvm->mcast_filter_cmd);
@ -1243,6 +1438,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
kfree(mvm->nvm_data);
kfree(mvm->mei_nvm_data);
kfree(rcu_access_pointer(mvm->csme_conn_info));
kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
@ -1251,6 +1449,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
if (mvm->mei_registered)
iwl_mei_unregister_complete();
ieee80211_free_hw(mvm->hw);
}
@ -1533,6 +1734,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
iwl_mvm_set_rfkill_state(mvm);
}
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
{
return rcu_dereference_protected(mvm->csme_conn_info,
lockdep_is_held(&mvm->mutex));
}
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@ -1616,6 +1823,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_error_collect(&mvm->fwrt, false);
} else if (test_bit(IWL_MVM_STATUS_STARTING,
&mvm->status)) {
IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@ -1668,9 +1878,16 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
iwl_fw_error_collect(&mvm->fwrt, false);
if (fw_error && mvm->fw_restart > 0)
if (fw_error && mvm->fw_restart > 0) {
mvm->fw_restart--;
ieee80211_restart_hw(mvm->hw);
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
mvm->fwrt.trans->dbg.restart_required = FALSE;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
}
}
}
@ -1678,7 +1895,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
&mvm->status))
iwl_mvm_dump_nic_error_log(mvm);
if (sync) {
@ -1699,7 +1918,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
return;
iwl_mvm_nic_restart(mvm, true);
iwl_mvm_nic_restart(mvm, false);
}
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
@ -1748,6 +1967,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
if (unlikely(queue >= mvm->trans->num_rx_queues))
return;
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@ -157,8 +157,41 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
/* we only support RLC command version 2 */
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_rlc_config_cmd cmd = {
.phy_id = cpu_to_le32(ctxt->id),
};
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
return 0;
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
PHY_RX_CHAIN_DRIVER_FORCE_MSK);
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID !=
PHY_RX_CHAIN_VALID_MSK);
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE !=
PHY_RX_CHAIN_FORCE_SEL_MSK);
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO !=
PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK);
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK);
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT !=
PHY_RX_CHAIN_MIMO_CNT_MSK);
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info,
chains_static, chains_dynamic);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD,
DATA_PATH_GROUP, 2),
0, sizeof(cmd), &cmd);
}
/*
@ -174,10 +207,9 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
PHY_CONTEXT_CMD, 1);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
if (ver == 3) {
if (ver == 3 || ver == 4) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@ -211,9 +243,16 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
}
if (ret)
if (ret) {
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;
return ret;
}
if (action != FW_CTXT_ACTION_REMOVE)
return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
chains_dynamic);
return 0;
}
/*
@ -228,6 +267,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
ctxt->channel = chandef->chan;
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
@ -257,6 +298,13 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
ctxt->channel == chandef->chan &&
ctxt->width == chandef->width &&
ctxt->center_freq1 == chandef->center_freq1)
return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
chains_dynamic);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
ctxt->channel->band != chandef->chan->band) {
@ -275,6 +323,8 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->channel = chandef->chan;
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
action);
@ -295,19 +345,32 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* otherwise we might not be able to reuse this phy.
*/
if (ctxt->ref == 0) {
struct ieee80211_channel *chan;
struct ieee80211_channel *chan = NULL;
struct cfg80211_chan_def chandef;
struct ieee80211_supported_band *sband = NULL;
enum nl80211_band band = NL80211_BAND_2GHZ;
struct ieee80211_supported_band *sband;
enum nl80211_band band;
int channel;
while (!sband && band < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[band++];
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = mvm->hw->wiphy->bands[band];
if (WARN_ON(!sband))
if (!sband)
continue;
for (channel = 0; channel < sband->n_channels; channel++)
if (!(sband->channels[channel].flags &
IEEE80211_CHAN_DISABLED)) {
chan = &sband->channels[channel];
break;
}
if (chan)
break;
}
if (WARN_ON(!chan))
return;
chan = &sband->channels[0];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/

View File

@ -7,39 +7,57 @@
#include "fw/api/commands.h"
#include "fw/api/phy-ctxt.h"
/**
/*
* DDR needs frequency in units of 16.666MHz, so provide FW with the
* frequency values in the adjusted format.
*/
static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
/* LPDDR4 */
/* frequency 2667MHz */
{cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 2933MHz */
{cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169,
171, 173, 175},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 3200MHz */
{cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* frequency 3733MHz */
{cpu_to_le16(223), {114, 116, 118, 120, 122,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
{cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4000MHz */
{cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4267MHz */
{cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* DDR5ePOR */
/* frequency 4000MHz */
{cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* frequency 4400MHz */
{cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* LPDDR5iPOR */
/* frequency 5200MHz */
{cpu_to_le16(312), {36, 38, 40, 42, 50,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
{cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5,}},
/* frequency 5600MHz */
{cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 6000MHz */
{cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
@ -107,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
return ERR_PTR(-EIO);
resp = kzalloc(resp_size, GFP_KERNEL);
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
if (!resp)
return ERR_PTR(-ENOMEM);
memcpy(resp, cmd.resp_pkt->data, resp_size);
iwl_free_resp(&cmd);
return resp;
}
void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
}

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2021 Intel Corporation
* Copyright (C) 2018-2022 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
sband->iftype_data &&
sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
return flags;
@ -129,7 +132,7 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
static void
rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_vht_cap *vht_cap,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
u16 supp;
int i, highest_mcs;
@ -154,7 +157,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp);
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
/*
* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
@ -164,8 +167,8 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160];
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
}
}
@ -189,7 +192,7 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
@ -219,7 +222,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
@ -230,14 +233,14 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
int i;
u16 supp = 0;
@ -263,15 +266,15 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
@ -291,8 +294,12 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
notif = (void *)pkt->data;
sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
if (IS_ERR_OR_NULL(sta)) {
IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
notif->sta_id);
/* can happen in remove station flow where mvm removed internally
* the station before removing from FW
*/
IWL_DEBUG_RATE(mvm,
"Invalid mvm RCU pointer for sta id (%d) in TLC notification\n",
notif->sta_id);
goto out;
}
@ -311,18 +318,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_UPDATE_NOTIF, 0) < 3) {
rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate),
le32_to_cpu(notif->rate));
IWL_DEBUG_RATE(mvm,
"Got rate in old format. Rate: %s. Converting.\n",
pretty_rate);
lq_sta->last_rate_n_flags =
iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
} else {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
}
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_UPDATE_NOTIF, 0) < 3) {
rs_pretty_print_rate_v1(pretty_rate,
sizeof(pretty_rate),
le32_to_cpu(notif->rate));
IWL_DEBUG_RATE(mvm,
"Got rate in old format. Rate: %s. Converting.\n",
pretty_rate);
lq_sta->last_rate_n_flags =
iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
} else {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
}
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
@ -415,26 +423,21 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hw *hw = mvm->hw;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd cfg_cmd = {
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_ch_width = update ?
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
.flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
.max_mpdu_len = cpu_to_le16(max_amsdu_len),
.amsdu = iwl_mvm_is_csum_supported(mvm),
.max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ?
cpu_to_le16(max_amsdu_len) : 0,
};
int ret;
u16 cmd_size = sizeof(cfg_cmd);
/* In old versions of the API the struct is 4 bytes smaller */
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD, 0) < 3)
cmd_size -= 4;
int cmd_ver;
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@ -449,8 +452,56 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd);
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD),
0);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
if (cmd_ver == 4) {
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
sizeof(cfg_cmd), &cfg_cmd);
} else if (cmd_ver < 4) {
struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = {
.sta_id = cfg_cmd.sta_id,
.max_ch_width = cfg_cmd.max_ch_width,
.mode = cfg_cmd.mode,
.chains = cfg_cmd.chains,
.amsdu = !!cfg_cmd.max_mpdu_len,
.flags = cfg_cmd.flags,
.non_ht_rates = cfg_cmd.non_ht_rates,
.ht_rates[0][0] = cfg_cmd.ht_rates[0][0],
.ht_rates[0][1] = cfg_cmd.ht_rates[0][1],
.ht_rates[1][0] = cfg_cmd.ht_rates[1][0],
.ht_rates[1][1] = cfg_cmd.ht_rates[1][1],
.sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp,
.max_mpdu_len = cfg_cmd.max_mpdu_len,
};
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
if (iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD), 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd_v3);
} else {
ret = -EINVAL;
}
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
}

View File

@ -67,7 +67,7 @@ struct iwl_lq_sta {
} pers;
};
#define RS_DRV_DATA_PACK(_lq_c, f) (0) /* XXX TODO | ? */
#define RS_DRV_DATA_PACK(_c, _f) ((void *)(uintptr_t)(_c | (uintptr_t)(_f) << sizeof(_c))) /* XXX TODO | ? */
struct iwl_mvm_sta;

View File

@ -189,9 +189,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
default:
/* Expected in monitor (not having the keys) */
#if defined(__linux__)
if (!mvm->monitor_on)
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
#elif defined(__FreeBSD__)
if (!mvm->monitor_on && net_ratelimit())
IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n",
__func__, rx_pkt_status);
#endif
}
return 0;
@ -528,40 +533,19 @@ struct iwl_mvm_stat_data {
u8 *beacon_average_energy;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
struct iwl_mvm_stat_data_all_macs {
struct iwl_mvm *mvm;
__le32 flags;
struct iwl_statistics_ntfy_per_mac *per_mac_stats;
};
static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
{
struct iwl_mvm_stat_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
int sig = -data->beacon_filter_average_energy;
int last_event;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int thold = vif->bss_conf.cqm_rssi_thold;
int hyst = vif->bss_conf.cqm_rssi_hyst;
u16 id = le32_to_cpu(data->mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
/* This doesn't need the MAC ID check since it's not taking the
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(data->beacon_counter[vif_id]);
mvmvif->beacon_stats.avg_signal =
-data->beacon_average_energy[vif_id];
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
if (mvmvif->id != id)
return;
if (vif->type != NL80211_IFTYPE_STATION)
return;
int last_event;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
@ -619,6 +603,73 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
}
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_stat_data *data = _data;
int sig = -data->beacon_filter_average_energy;
u16 id = le32_to_cpu(data->mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
/* This doesn't need the MAC ID check since it's not taking the
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(data->beacon_counter[vif_id]);
mvmvif->beacon_stats.avg_signal =
-data->beacon_average_energy[vif_id];
if (mvmvif->id != id)
return;
if (vif->type != NL80211_IFTYPE_STATION)
return;
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
iwl_mvm_update_vif_sig(vif, sig);
}
static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_stat_data_all_macs *data = _data;
struct iwl_statistics_ntfy_per_mac *mac_stats;
int sig;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
return;
if (vif->type != NL80211_IFTYPE_STATION)
return;
mac_stats = &data->per_mac_stats[vif_id];
mvmvif->beacon_stats.num_beacons =
le32_to_cpu(mac_stats->beacon_counter);
mvmvif->beacon_stats.avg_signal =
-le32_to_cpu(mac_stats->beacon_average_energy);
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
iwl_mvm_update_vif_sig(vif, sig);
}
static inline void
iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
{
@ -685,47 +736,41 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
}
static void
iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
{
struct iwl_mvm_stat_data_all_macs data = {
.mvm = mvm,
.flags = stats->flags,
.per_mac_stats = stats->per_mac_stats,
};
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
}
static void
iwl_mvm_stats_ver_14(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy_ver_14 *stats)
{
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
u8 beacon_average_energy[MAC_INDEX_AUX];
u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
struct iwl_statistics_operational_ntfy *stats;
int expected_size;
__le32 flags;
int i;
expected_size = sizeof(*stats);
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
"received invalid statistics size (%d)!, expected_size: %d\n",
iwl_rx_packet_payload_len(pkt), expected_size))
return;
stats = (void *)&pkt->data;
if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL ||
stats->hdr.version !=
iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, STATISTICS_CMD, 0),
"received unsupported hdr type %d, version %d\n",
stats->hdr.type, stats->hdr.version))
return;
flags = stats->flags;
mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan);
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
data.mac_id = stats->mac_id;
data.beacon_filter_average_energy =
le32_to_cpu(stats->beacon_filter_average_energy);
data.flags = flags;
data.beacon_counter = stats->beacon_counter;
for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++)
beacon_average_energy[i] =
le32_to_cpu(stats->beacon_average_energy[i]);
@ -736,9 +781,105 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
}
static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt,
u32 expected_size)
{
struct iwl_statistics_ntfy_hdr *hdr;
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
"received invalid statistics size (%d)!, expected_size: %d\n",
iwl_rx_packet_payload_len(pkt), expected_size))
return false;
hdr = (void *)&pkt->data;
if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL ||
hdr->version !=
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0),
"received unsupported hdr type %d, version %d\n",
hdr->type, hdr->version))
return false;
if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size,
"received invalid statistics size in header (%d)!, expected_size: %d\n",
le16_to_cpu(hdr->size), expected_size))
return false;
return true;
}
static void
iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
__le32 air_time[MAC_INDEX_AUX];
__le32 rx_bytes[MAC_INDEX_AUX];
__le32 flags = 0;
int i;
u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
STATISTICS_NOTIFICATION, 0);
if (WARN_ONCE(notif_ver > 15,
"invalid statistics version id: %d\n", notif_ver))
return;
if (notif_ver == 14) {
struct iwl_statistics_operational_ntfy_ver_14 *stats =
(void *)pkt->data;
if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
return;
iwl_mvm_stats_ver_14(mvm, stats);
flags = stats->flags;
mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->on_time_scan);
for (i = 0; i < ARRAY_SIZE(average_energy); i++)
average_energy[i] = le32_to_cpu(stats->average_energy[i]);
for (i = 0; i < ARRAY_SIZE(air_time); i++) {
air_time[i] = stats->air_time[i];
rx_bytes[i] = stats->rx_bytes[i];
}
}
if (notif_ver == 15) {
struct iwl_statistics_operational_ntfy *stats =
(void *)pkt->data;
if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
return;
iwl_mvm_stats_ver_15(mvm, stats);
flags = stats->flags;
mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->on_time_scan);
for (i = 0; i < ARRAY_SIZE(average_energy); i++)
average_energy[i] =
le32_to_cpu(stats->per_sta_stats[i].average_energy);
for (i = 0; i < ARRAY_SIZE(air_time); i++) {
air_time[i] = stats->per_mac_stats[i].air_time;
rx_bytes[i] = stats->per_mac_stats[i].rx_bytes;
}
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
for (i = 0; i < ARRAY_SIZE(average_energy); i++)
average_energy[i] = le32_to_cpu(stats->average_energy[i]);
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
/*
@ -747,8 +888,7 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
* request and once in statistics notification.
*/
if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
iwl_mvm_update_tcm_from_stats(mvm, stats->air_time,
stats->rx_bytes);
iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes);
}
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
@ -762,8 +902,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
u8 *energy;
/* From ver 14 and up we use TLV statistics format */
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
STATISTICS_CMD, 0) >= 14)
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
STATISTICS_NOTIFICATION, 0) >= 14)
return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt);
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {

View File

@ -124,12 +124,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
len -= 2;
pad_len = 2;
}
/*
* For non monitor interface strip the bytes the RADA might not have
* removed. As monitor interface cannot exist with other interfaces
* this removal is safe.
*/
if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) {
u32 pkt_flags = le32_to_cpu(pkt->len_n_flags);
/*
* If RADA was not enabled then decryption was not performed so
* the MIC cannot be removed.
*/
if (!(pkt_flags & FH_RSCSR_RADA_EN)) {
if (WARN_ON(crypt_len > mic_crc_len))
return -EINVAL;
mic_crc_len -= crypt_len;
}
if (WARN_ON(mic_crc_len > len))
return -EINVAL;
len -= mic_crc_len;
}
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
* an additional 8 bytes for SNAP/ethertype, see below) so that
@ -152,18 +179,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
hdrlen += crypt_len;
if (WARN_ONCE(headlen < hdrlen,
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
hdrlen, len, crypt_len)) {
/*
* We warn and trace because we want to be able to see
* it in trace-cmd as well.
*/
IWL_DEBUG_RX(mvm,
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
hdrlen, len, crypt_len);
if (unlikely(headlen < hdrlen))
return -EINVAL;
}
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@ -175,8 +192,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
* in the cases the hardware didn't handle, since it's rare to see
* such packets, even though the hardware did calculate the checksum
* in this case, just starting after the MAC header instead.
*
* Starting from Bz hardware, it calculates starting directly after
* the MAC header, so that matches mac80211's expectation.
*/
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (skb->ip_summed == CHECKSUM_COMPLETE &&
mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
struct {
u8 hdr[6];
__be16 type;
@ -191,6 +212,9 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
else
/* mac80211 assumes full CSUM including SNAP header */
skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
fraglen = len - headlen;
@ -450,8 +474,12 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
if (!is_multicast_ether_addr(hdr->addr1) &&
!mvm->monitor_on && net_ratelimit())
#if defined(__linux__)
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
#elif defined(__FreeBSD__)
IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n",
__func__, status);
#endif
}
return 0;
@ -506,8 +534,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
#if defined(__FreeBSD__)
if (WARN_ON(mvm_sta->dup_data == NULL))
return false;
#endif
dup_data = &mvm_sta->dup_data[queue];
/*
@ -778,8 +808,11 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON_ONCE(!ba_data))
if (!ba_data) {
WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC),
"BAID %d not found in map\n", baid);
goto out;
}
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
@ -1973,8 +2006,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (format == RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_MCS_NSS_MSK) >>
rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
RATE_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;

View File

@ -20,7 +20,6 @@
#define IWL_SCAN_DWELL_FRAGMENTED 44
#define IWL_SCAN_DWELL_EXTENDED 90
#define IWL_SCAN_NUM_OF_FRAGS 3
#define IWL_SCAN_LAST_2_4_CHN 14
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
u32 n_6ghz_params;
bool scan_6ghz;
bool enable_6ghz_passive;
bool respect_p2p_go, respect_p2p_go_hb;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int *global_cnt = data;
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
*global_cnt += 1;
}
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
return mvm->tcm.result.global_load;
@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
struct iwl_is_dcm_with_go_iterator_data {
struct iwl_mvm_scan_iter_data {
u32 global_cnt;
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_is_dcm_with_go_iterator_data *data = _data;
struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif *curr_mvmvif =
iwl_mvm_vif_from_mac80211(data->current_vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_scan_iter_data *data = _data;
struct iwl_mvm_vif *curr_mvmvif;
/* exclude the given vif */
if (vif == data->current_vif)
if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
mvmvif->phy_ctxt->id < NUM_PHY_CTX)
data->global_cnt += 1;
if (!data->current_vif || vif == data->current_vif)
return;
curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
int global_cnt = 0;
struct iwl_mvm_scan_iter_data data = {
.current_vif = vif,
.is_dcm_with_p2p_go = false,
.global_cnt = 0,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_cnt);
if (!global_cnt)
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_iterator,
&data);
if (!data.global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if (fw_has_api(&mvm->fw->ucode_capa,
@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
/* in case of DCM with GO where BSS DTIM interval < 220msec
/*
* in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
* */
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
vif->bss_conf.dtim_period < 220) {
struct iwl_is_dcm_with_go_iterator_data data = {
.current_vif = vif,
.is_dcm_with_p2p_go = false,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_is_dcm_with_go_iterator,
&data);
if (data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
vif->bss_conf.dtim_period < 220 &&
data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
@ -579,7 +569,9 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile->ssid_index = i;
/* Support any cipher and auth algorithm */
profile->unicast_cipher = 0xff;
profile->auth_alg = 0xff;
profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
profile->network_type = IWL_NETWORK_TYPE_ANY;
profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
@ -649,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
NL80211_BAND_2GHZ,
no_cck);
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA,
0) < 12) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
@ -1088,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA, 0) >= 12);
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@ -1140,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA, 0) >= 12);
WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@ -1154,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
void *cfg;
int ret, cmd_size;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
};
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
@ -1245,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config cfg;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
.len[0] = sizeof(cfg),
.data[0] = &cfg,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@ -1256,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA, 0) < 12) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
SCAN_CFG_CMD, 0) < 5) {
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
/*
* Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
* version 5.
@ -1394,8 +1380,8 @@ static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
}
static void
iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
struct iwl_scan_general_params_v10 *general_params,
iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
struct iwl_scan_general_params_v11 *general_params,
struct iwl_mvm_scan_params *params)
{
struct iwl_mvm_scan_timing_params *timing, *hb_timing;
@ -1660,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
}
}
static int
static void
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct iwl_scan_probe_params_v4 *pp)
@ -1729,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
pp->short_ssid_num = idex_s;
pp->bssid_num = idex_b;
return 0;
}
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
static void
iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
static u32
iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
u32 n_channels,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_channel_params_v6 *cp,
enum nl80211_iftype vif_type)
{
struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
int i;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
u32 ch_cnt;
for (i = 0; i < params->n_channels; i++) {
for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
struct iwl_scan_channel_cfg_umac *cfg =
&cp->channel_config[i];
&cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
bool force_passive, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
/*
* Avoid performing passive scan on non PSC channels unless the
* scan is specifically a passive scan, i.e., no SSIDs
* configured in the scan command.
*/
if (!cfg80211_channel_is_psc(params->channels[i]) &&
!params->n_6ghz_params && params->n_ssids)
continue;
cfg->v1.channel_num = params->channels[i]->hw_value;
cfg->v2.band = 2;
cfg->v2.iter_count = 1;
@ -1826,8 +1821,6 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
}
}
flags = bssid_bitmap | (s_ssid_bitmap << 16);
if (cfg80211_channel_is_psc(params->channels[i]) &&
psc_no_listen)
flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
@ -1869,11 +1862,22 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
(s_max > 1 || b_max > 3));
}
if ((allow_passive && force_passive) ||
(!flags && !cfg80211_channel_is_psc(params->channels[i])))
(!(bssid_bitmap | s_ssid_bitmap) &&
!cfg80211_channel_is_psc(params->channels[i])))
flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
else
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
channel_cfg[i].flags |= cpu_to_le32(flags);
cfg->flags |= cpu_to_le32(flags);
ch_cnt++;
}
if (params->n_channels > ch_cnt)
IWL_DEBUG_SCAN(mvm,
"6GHz: reducing number channels: (%u->%u)\n",
params->n_channels, ch_cnt);
return ch_cnt;
}
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
@ -1890,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
/* set fragmented ebs for fragmented scan on HB channels */
if (iwl_mvm_is_scan_fragmented(params->hb_type))
if ((!iwl_mvm_is_cdb_supported(mvm) &&
iwl_mvm_is_scan_fragmented(params->type)) ||
(iwl_mvm_is_cdb_supported(mvm) &&
iwl_mvm_is_scan_fragmented(params->hb_type)))
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
/*
* force EBS in case the scan is a fragmented and there is a need to take P2P
* GO operation into consideration during scan operation.
*/
if ((!iwl_mvm_is_cdb_supported(mvm) &&
iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
(iwl_mvm_is_cdb_supported(mvm) &&
iwl_mvm_is_scan_fragmented(params->hb_type) &&
params->respect_p2p_go_hb)) {
IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
}
return flags;
}
@ -1924,22 +1944,19 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
}
/*
* 6GHz passive scan is allowed while associated in a defined time
* interval following HW reset or resume flow
* 6GHz passive scan is allowed in a defined time interval following HW
* reset or resume flow, or while not associated and a large interval
* has passed since the last 6GHz passive scan.
*/
if (vif->bss_conf.assoc &&
if ((vif->bss_conf.assoc ||
time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
(time_before(mvm->last_reset_or_resume_time_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
jiffies))) {
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n");
return;
}
/* No need for 6GHz passive scan if not enough time elapsed */
if (time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) {
IWL_DEBUG_SCAN(mvm,
"6GHz passive scan: timeout did not expire\n");
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
vif->bss_conf.assoc ? "associated" :
"timeout did not expire");
return;
}
@ -2037,6 +2054,32 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
if (params->enable_6ghz_passive)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
if (iwl_mvm_is_oce_supported(mvm) &&
(params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
return flags;
}
static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif, int type)
{
u8 flags = 0;
if (iwl_mvm_is_cdb_supported(mvm)) {
if (params->respect_p2p_go)
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
if (params->respect_p2p_go_hb)
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
} else {
if (params->respect_p2p_go)
flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
}
return flags;
}
@ -2158,8 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
void *sec_part = (u8 *)cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
@ -2239,17 +2281,21 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
static void
iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_general_params_v10 *gp,
u16 gen_flags)
struct iwl_scan_general_params_v11 *gp,
u16 gen_flags, u8 gen_flags2)
{
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
gen_flags, gen_flags2);
gp->flags = cpu_to_le16(gen_flags);
gp->flags2 = gen_flags2;
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@ -2351,9 +2397,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
gen_flags);
gen_flags, 0);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@ -2368,16 +2414,18 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params,
int type, int uid, u32 version)
{
struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd;
struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params;
struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd;
struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params;
struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params;
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
int ret;
u16 gen_flags;
u8 gen_flags2;
u32 bitmap_ssid = 0;
mvm->scan_uid_status[uid] = type;
@ -2386,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
if (version >= 15)
gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
else
gen_flags2 = 0;
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
gen_flags);
gen_flags, gen_flags2);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@ -2411,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
if (ret)
return ret;
iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
params->n_channels,
pb, cp, vif->type);
if (!cp->count) {
mvm->scan_uid_status[uid] = 0;
return -EINVAL;
}
iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
params->n_channels,
pb, cp, vif->type);
cp->count = params->n_channels;
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
@ -2426,6 +2482,20 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
{
return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
}
static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
{
return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
}
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@ -2499,7 +2569,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
#define SCAN_TIMEOUT 20000
#define SCAN_TIMEOUT 30000
void iwl_mvm_scan_timeout_wk(struct work_struct *work)
{
@ -2541,6 +2611,7 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(15),
IWL_SCAN_UMAC_HANDLER(14),
IWL_SCAN_UMAC_HANDLER(12),
};
@ -2567,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (uid < 0)
return uid;
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC,
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
@ -2590,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
return uid;
}
struct iwl_mvm_scan_respect_p2p_go_iter_data {
struct ieee80211_vif *current_vif;
bool p2p_go;
enum nl80211_band band;
};
static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* exclude the given vif */
if (vif == data->current_vif)
return;
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
(data->band == NUM_NL80211_BANDS ||
mvmvif->phy_ctxt->channel->band == data->band))
data->p2p_go = true;
}
static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool low_latency,
enum nl80211_band band)
{
struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
.current_vif = vif,
.p2p_go = false,
.band = band,
};
if (!low_latency)
return false;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_respect_p2p_go_iter,
&data);
return data.p2p_go;
}
static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum nl80211_band band)
{
bool low_latency = iwl_mvm_low_latency_band(mvm, band);
return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
}
static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
bool low_latency = iwl_mvm_low_latency(mvm);
return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
NUM_NL80211_BANDS);
}
static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->respect_p2p_go =
iwl_mvm_get_respect_p2p_go_band(mvm, vif,
NL80211_BAND_2GHZ);
params->respect_p2p_go_hb =
iwl_mvm_get_respect_p2p_go_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
}
}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@ -2641,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_6ghz_params = req->scan_6ghz_params;
params.scan_6ghz = req->scan_6ghz;
iwl_mvm_fill_scan_type(mvm, &params, vif);
iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
if (req->duration)
params.iter_notif = true;
@ -2732,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params, vif);
iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@ -2901,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
ret = iwl_mvm_send_cmd_pdu(mvm,
iwl_cmd_id(SCAN_ABORT_UMAC,
IWL_ALWAYS_LONG_GROUP, 0),
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@ -2941,15 +3091,14 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
}
static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
case 12:
return sizeof(struct iwl_scan_req_umac_v12);
case 14:
case 15:
return sizeof(struct iwl_scan_req_umac_v15);
}
return 0;
@ -2958,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC,
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2015, 2018-2021 Intel Corporation
* Copyright (C) 2012-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -320,7 +320,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
}
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u16 *queueptr, u8 tid, u8 flags)
u16 *queueptr, u8 tid)
{
int queue = *queueptr;
struct iwl_scd_txq_cfg_cmd cmd = {
@ -329,11 +329,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
};
int ret;
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_tx_api(mvm)) {
if (mvm->sta_remove_requires_queue_remove) {
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
SCD_QUEUE_CONFIG_CMD);
struct iwl_scd_queue_cfg_cmd remove_cmd = {
.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
.u.remove.queue = cpu_to_le32(queue),
};
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
sizeof(remove_cmd),
&remove_cmd);
} else {
ret = 0;
}
iwl_trans_txq_free(mvm->trans, queue);
*queueptr = IWL_MVM_INVALID_QUEUE;
return 0;
return ret;
}
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
@ -377,7 +394,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm->queue_info[queue].reserved = false;
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
if (ret)
@ -516,7 +533,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@ -600,6 +617,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return queue;
}
/* Re-configure the SCD for a queue that has already been configured */
static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit,
.sta_id = sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = fifo,
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
.tid = tid,
};
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue))
return -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
queue, fifo, ret);
return ret;
}
/*
* If a given queue has a higher AC than the TID stream that is being compared
* to, the queue needs to be redirected to the lower AC. This function does that
@ -720,21 +770,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
mvm->trans->cfg->min_256_ba_txq_size);
int queue, size;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
} else {
struct ieee80211_sta *sta;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/* this queue isn't used for traffic (cab_queue) */
if (IS_ERR_OR_NULL(sta)) {
size = IWL_MGMT_QUEUE_SIZE;
} else if (sta->he_cap.has_he) {
/* support for 256 ba size */
size = IWL_DEFAULT_QUEUE_SIZE_HE;
} else {
size = IWL_DEFAULT_QUEUE_SIZE;
}
rcu_read_unlock();
}
do {
__le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
/* take the min with bc tbl entries allowed */
size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
queue = iwl_trans_txq_alloc(mvm->trans, enable,
sta_id, tid, SCD_QUEUE_CFG,
size, timeout);
/* size needs to be power of 2 values for calculating read/write pointers */
size = rounddown_pow_of_two(size);
do {
queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
tid, size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
@ -1023,12 +1092,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
u16 tid_bitmap;
u16 q_tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
/*
* We need to take into account a situation in which a TXQ was
@ -1041,7 +1110,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Mark this queue in the right bitmap, we'll send the command
* to the firmware later.
*/
if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
set_bit(queue, changetid_queues);
IWL_DEBUG_TX_QUEUES(mvm,
@ -1341,7 +1410,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
out_err:
queue_tmp = queue;
iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
return ret;
}
@ -1520,8 +1589,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
0) >= 12 &&
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
sta->type == IWL_STA_AUX_ACTIVITY)
cmd.mac_id_n_color = cpu_to_le32(mac_id);
else
@ -1735,8 +1803,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
break;
default:
ret = -EIO;
#if defined(__linux__)
IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
mvmsta->sta_id);
#elif defined(__FreeBSD__)
IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n",
mvmsta->sta_id, status);
#endif
break;
}
@ -1788,8 +1861,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
0);
iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
@ -1836,10 +1908,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
#if defined(__linux__)
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
#elif defined(__FreeBSD__)
if (iwl_mvm_has_new_rx_api(mvm)) {
kfree(mvm_sta->dup_data);
mvm_sta->dup_data = NULL;
}
#endif
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
@ -1999,7 +2076,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, queue,
IWL_MAX_TID_COUNT, 0);
IWL_MAX_TID_COUNT);
return ret;
}
@ -2071,7 +2148,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@ -2088,7 +2165,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@ -2205,7 +2282,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
}
queue = *queueptr;
iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@ -2440,7 +2517,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@ -2449,8 +2526,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
#define IWL_MAX_RX_BA_SESSIONS 16
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
struct iwl_mvm_delba_data notif = {
@ -2532,18 +2607,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
}
static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
bool start, int tid, u16 ssn,
u16 buf_size)
{
struct iwl_mvm_add_sta_cmd cmd = {
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
.sta_id = mvm_sta->sta_id,
.add_modify = STA_MODE_MODIFY,
};
u32 status;
int ret;
if (start) {
cmd.add_immediate_ba_tid = tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
cmd.rx_ba_window = cpu_to_le16(buf_size);
cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
} else {
cmd.remove_immediate_ba_tid = tid;
cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
}
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
!(status & IWL_ADD_STA_BAID_VALID_MASK)))
return -EINVAL;
return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
case ADD_STA_IMMEDIATE_BA_FAILURE:
IWL_WARN(mvm, "RX BA Session refused by fw\n");
return -ENOSPC;
default:
IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
start ? "start" : "stopp", status);
return -EIO;
}
}
static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
bool start, int tid, u16 ssn,
u16 buf_size, int baid)
{
struct iwl_rx_baid_cfg_cmd cmd = {
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
};
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int ret;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
if (start) {
cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
cmd.alloc.tid = tid;
cmd.alloc.ssn = cpu_to_le16(ssn);
cmd.alloc.win_size = cpu_to_le16(buf_size);
baid = -EIO;
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
cmd.remove_v1.baid = cpu_to_le32(baid);
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
} else {
cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
cmd.remove.tid = cpu_to_le32(tid);
}
ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
&cmd, &baid);
if (ret)
return ret;
if (!start) {
/* ignore firmware baid on remove */
baid = 0;
}
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
return -EINVAL;
return baid;
}
static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
bool start, int tid, u16 ssn, u16 buf_size,
int baid)
{
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
tid, ssn, buf_size, baid);
return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
tid, ssn, buf_size);
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
struct iwl_mvm_baid_data *baid_data = NULL;
int ret;
u32 status;
int ret, baid;
u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
IWL_MAX_BAID_OLD;
lockdep_assert_held(&mvm->mutex);
if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
return -ENOSPC;
}
@ -2589,59 +2772,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
reorder_buf_size / sizeof(baid_data->entries[0]);
}
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
cmd.rx_ba_window = cpu_to_le16(buf_size);
if (iwl_mvm_has_new_rx_api(mvm) && !start) {
baid = mvm_sta->tid_to_baid[tid];
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
goto out_free;
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
break;
case ADD_STA_IMMEDIATE_BA_FAILURE:
IWL_WARN(mvm, "RX BA Session refused by fw\n");
ret = -ENOSPC;
break;
default:
ret = -EIO;
IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
start ? "start" : "stopp", status);
break;
/* we don't really need it in this case */
baid = -1;
}
if (ret)
/* Don't send command to remove (start=0) BAID during restart */
if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
baid);
if (baid < 0) {
ret = baid;
goto out_free;
}
if (start) {
u8 baid;
mvm->rx_ba_sessions++;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
ret = -EINVAL;
goto out_free;
}
baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
IWL_ADD_STA_BAID_SHIFT);
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
@ -2669,7 +2822,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else {
u8 baid = mvm_sta->tid_to_baid[tid];
baid = mvm_sta->tid_to_baid[tid];
if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
@ -2690,6 +2843,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
/*
* After we've deleted it, do another queue sync
* so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
* running it won't find a new session in the old
* BAID. It can find the NULL pointer for the BAID,
* but we must not have it find a different session.
*/
iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
true, NULL, 0);
}
return 0;
@ -3234,8 +3397,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
int i, size;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA_KEY,
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
new_api ? 2 : 1);
if (sta_id == IWL_MVM_INVALID_STA)
@ -3935,7 +4097,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
if (!WARN_ON(!mvmsta))
if (mvmsta)
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
rcu_read_unlock();
@ -3994,3 +4156,21 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 mac_id)
{
struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
.mac_id = cpu_to_le32(mac_id),
};
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
CMD_ASYNC,
sizeof(cancel_channel_switch_cmd),
&cancel_channel_switch_cmd);
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data {
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
* exchange).
* @pairwise_cipher: used to feed iwlmei upon authorization
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@ -415,6 +416,7 @@ struct iwl_mvm_sta {
u8 sleep_tx_count;
u8 avg_energy;
u8 tx_ant;
u32 pairwise_cipher;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@ -546,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
u8 *key, u32 key_len);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 mac_id);
#endif /* __sta_h__ */

View File

@ -608,11 +608,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
/* make sure the phy is on the base channel */
if (wait_for_phy)
#if defined(__linux__)
msleep(
msleep(TU_TO_MS(vif->bss_conf.dtim_period *
#elif defined(__FreeBSD__)
linux_msleep(
linux_msleep(TU_TO_MS(vif->bss_conf.dtim_period *
#endif
TU_TO_MS(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int));
/* flush the channel switch state */

View File

@ -49,14 +49,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
/*
* Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
synchronize_net();
@ -82,14 +81,23 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
} else {
}
/*
* Clear the ROC_AUX_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
/* do the same in case of hot spot 2.0 */
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
ADD_STA, 0) >= 12)
if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
iwl_mvm_rm_aux_sta(mvm);
}
@ -649,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
};
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
@ -687,11 +695,14 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
/* When session protection is supported, the te_data->id field
/* When session protection is used, the te_data->id field
* is reused to save session protection's configuration.
* For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
* to HOT_SPOT_CMD.
*/
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
id != HOT_SPOT_CMD) {
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
@ -911,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
}
cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
}
@ -1027,7 +1038,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_p2p_roc_finished(mvm);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->time_event_data);
&mvmvif->hs_time_event_data);
iwl_mvm_roc_finished(mvm);
}
@ -1150,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
MAC_CONF_GROUP, 0) };
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
@ -1189,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (!wait_for_notif) {
if (iwl_mvm_send_cmd_pdu(mvm,
iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
@ -1207,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_session_prot_notif, NULL);
if (iwl_mvm_send_cmd_pdu(mvm,
iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2019-2020 Intel Corporation
* Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@ -162,6 +162,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
notif = (struct ct_kill_notif *)pkt->data;
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
notif->temperature);
if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
CT_KILL_NOTIFICATION, 0) > 1)
IWL_DEBUG_TEMP(mvm,
"CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
notif->dts, notif->scheme);
iwl_mvm_enter_ctkill(mvm);
}
@ -242,8 +247,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
* a response. For older versions we send the command and wait for a
* notification (no command TLV for previous versions).
*/
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1)
return iwl_mvm_send_temp_cmd(mvm, true, temp);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -39,11 +39,11 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info,
u16 offload_assist)
static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, bool amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u16 offload_assist = 0;
#if IS_ENABLED(CONFIG_INET)
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
u8 protocol = 0;
@ -106,8 +106,7 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
if (skb->protocol == htons(ETH_P_IP) &&
(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
if (skb->protocol == htons(ETH_P_IP) && amsdu) {
ip_hdr(skb)->check = 0;
offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
}
@ -132,9 +131,63 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
out:
#endif
if (amsdu)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
else if (ieee80211_hdrlen(hdr->frame_control) % 4)
/* padding is inserted later in transport */
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
return offload_assist;
}
u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
unsigned int csum_start = skb_checksum_start_offset(skb);
offload_assist |= u32_encode_bits(hdrlen / 2,
IWL_TX_CMD_OFFLD_BZ_MH_LEN);
if (amsdu)
offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU;
else if (hdrlen % 4)
/* padding is inserted later in transport */
offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return offload_assist;
offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM |
IWL_TX_CMD_OFFLD_BZ_ZERO2ONES;
/*
* mac80211 will always calculate checksum in software for
* non-fast-xmit, and so we can only do offloaded checksum
* for fast-xmit frames. In this case, we always have the
* RFC 1042 header present. skb_checksum_start_offset()
* returns the offset from the beginning, but the hardware
* needs it from after the header & SNAP header.
*/
csum_start -= hdrlen + 8;
offload_assist |= u32_encode_bits(csum_start,
IWL_TX_CMD_OFFLD_BZ_START_OFFS);
offload_assist |= u32_encode_bits(csum_start + skb->csum_offset,
IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS);
return offload_assist;
}
static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
bool amsdu)
{
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu);
return iwl_mvm_tx_csum_bz(mvm, skb, amsdu);
}
/*
* Sets most of the Tx cmd's fields
*/
@ -146,7 +199,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
u16 offload_assist = 0;
bool amsdu = false;
u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
@ -166,8 +219,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
@ -234,14 +286,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
/* padding is inserted later in transport */
if (ieee80211_hdrlen(fc) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
tx_cmd->offload_assist |=
cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
offload_assist));
tx_cmd->offload_assist =
cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu));
}
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
@ -269,7 +315,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
u8 rate_plcp;
u32 rate_flags = 0;
bool is_cck;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
@ -279,7 +324,8 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
le16_to_cpu(fc), mvmsta->sta_state);
le16_to_cpu(fc),
sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
@ -304,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
if (!is_cck)
rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
else
@ -462,27 +508,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
u16 offload_assist = 0;
u32 rate_n_flags = 0;
u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
bool amsdu = false;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
offload_assist);
/* padding is inserted later in transport */
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
@ -502,8 +539,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
cmd->offload_assist |= cpu_to_le32(offload_assist);
cmd->offload_assist = cpu_to_le32(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
@ -515,8 +554,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
} else {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb,
info,
amsdu);
cmd->offload_assist |= cpu_to_le16(offload_assist);
cmd->offload_assist = cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
@ -630,9 +672,9 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
goto out;
ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
mgmt->u.probe_resp.variable,
skb->len - base_len,
match, 4, 2);
mgmt->u.probe_resp.variable,
skb->len - base_len,
match, 4, 2);
if (!ie) {
IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
goto out;
@ -1129,6 +1171,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
if (ieee80211_is_data(fc))
iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
info->control.hw_key ?
info->control.hw_key->iv_len : 0);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@ -1555,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
if (unlikely(!seq_ctl)) {
struct ieee80211_hdr *hdr = (void *)skb->data;
/*
* If it is an NDP, we can't update next_reclaim since
* its sequence control is 0. Note that for that same

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -172,8 +172,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
TX_CMD, 0) > 8)
if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
/* In the new rate legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM.
*/
@ -244,38 +243,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit,
.sta_id = sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = fifo,
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
.tid = tid,
};
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue))
return -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
queue, fifo, ret);
return ret;
}
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @mvm: Driver data.
@ -343,25 +310,64 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
return true;
}
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
{
struct iwl_statistics_cmd scmd = {
.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
};
struct iwl_host_cmd cmd = {
.id = STATISTICS_CMD,
.len[0] = sizeof(scmd),
.data[0] = &scmd,
.flags = CMD_WANT_SKB,
};
int ret;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret)
return ret;
/* From version 15 - STATISTICS_NOTIFICATION, the reply for
* STATISTICS_CMD is empty, and the response is with
* STATISTICS_NOTIFICATION notification
*/
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
STATISTICS_NOTIFICATION, 0) < 15) {
cmd.flags = CMD_WANT_SKB;
iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
iwl_free_resp(&cmd);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret)
return ret;
iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
iwl_free_resp(&cmd);
} else {
struct iwl_notification_wait stats_wait;
static const u16 stats_complete[] = {
STATISTICS_NOTIFICATION,
};
iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
stats_complete, ARRAY_SIZE(stats_complete),
iwl_wait_stats_complete, NULL);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
iwl_remove_notification(&mvm->notif_wait, &stats_wait);
return ret;
}
/* 200ms should be enough for FW to collect data from all
* LMACs and send STATISTICS_NOTIFICATION to host
*/
ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
if (ret)
return ret;
}
if (clear)
iwl_mvm_accu_radio_stats(mvm);
@ -444,8 +450,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
cmd.low_latency_tx = 1;
}
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
MAC_CONF_GROUP, 0),
if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send low latency command\n");
}

View File

@ -495,14 +495,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@ -510,16 +512,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
_rf_id, _no_160, _cores, _cdb, _cfg, _name) \
_rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \
.no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
.mac_step = _mac_step, .cdb = _cdb }
.mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@ -562,6 +564,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
@ -665,97 +668,113 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL),
IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
/* SO with GF2 */
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
/* MA with GF2 */
IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9461_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9461_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9462_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9462_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@ -763,176 +782,176 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QnJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@ -940,304 +959,376 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_c0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qu_c0_hr_b0, iwl_ax201_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
/* QnJ with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
/* SnJ with Jf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_jf_b0, iwl9560_name),
/* SnJ with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_hr_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_hr_b0, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_mr_a0, iwl_ax221_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Gf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* Bz */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_hr_b0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_gf_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_gf4_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_mr_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
/* BZ Z step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_z0_gf_a0, iwl_bz_name),
/* BNJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_gf_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_hr_b0, iwl_bz_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* MsP */
/* For now we use the same FW as MR, but this will change in the future. */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
#endif /* CONFIG_IWLMVM */
};
@ -1249,22 +1340,14 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
static int get_crf_id(struct iwl_trans *iwl_trans)
{
int ret = 0;
u32 wfpm_ctrl_addr;
u32 wfpm_otp_cfg_addr;
u32 sd_reg_ver_addr;
u32 cdb = 0;
u32 val;
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2;
wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2;
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
/* Qu/Pu families have other addresses */
} else {
wfpm_ctrl_addr = WFPM_CTRL_REG;
wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR;
else
sd_reg_ver_addr = SD_REG_VER;
}
if (!iwl_trans_grab_nic_access(iwl_trans)) {
IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n");
@ -1273,15 +1356,15 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
}
/* Enable access to peripheral registers */
val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr);
val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
val |= ENABLE_WFPM;
iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val);
iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
/* Read crf info */
val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
/* Read cdb info (also contains the jacket info if needed in the future */
cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr);
cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
/* Map between crf id to rf id */
switch (REG_CRF_ID_TYPE(val)) {
@ -1337,11 +1420,15 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
static const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step,
u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
if (!num_devices)
return NULL;
for (i = num_devices - 1; i >= 0; i--) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
if (dev_info->device != (u16)IWL_CFG_ANY &&
@ -1368,6 +1455,10 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
dev_info->cdb != cdb)
continue;
if (dev_info->jacket != (u8)IWL_CFG_ANY &&
dev_info->jacket != jacket)
continue;
if (dev_info->rf_id != (u8)IWL_CFG_ANY &&
dev_info->rf_id != rf_id)
continue;
@ -1422,15 +1513,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* first trying to load the firmware etc. and potentially only
* detecting any problems when the first interface is brought up.
*/
ret = iwl_finish_nic_init(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
ret = -EIO;
goto out_free_trans;
ret = iwl_pcie_prepare_card_hw(iwl_trans);
if (!ret) {
ret = iwl_finish_nic_init(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
ret = -EIO;
goto out_free_trans;
}
}
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
@ -1442,14 +1536,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->trans_cfg->rf_id &&
iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
!CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
!CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
ret = -EINVAL;
goto out_free_trans;
}
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
CSR_HW_REV_TYPE(iwl_trans->hw_rev),
CSR_HW_REV_STEP(iwl_trans->hw_rev),
iwl_trans->hw_rev_step,
CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
IWL_SUBDEVICE_CORES(pdev->subsystem_device));
@ -1488,21 +1585,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
iwl_trans->cfg = cfg_7265d;
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
}
/*
* This is a hack to switch from Qu B0 to Qu C0. We need to
* do this for all cfgs that use Qu B0, except for those using
@ -1563,6 +1645,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
pci_set_drvdata(pdev, iwl_trans);
/* try to get ownership so that we'll know if we don't own it */
iwl_pcie_prepare_card_hw(iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans);
if (IS_ERR(iwl_trans->drv)) {

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2003-2015, 2018-2021 Intel Corporation
* Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -103,6 +103,18 @@ struct iwl_rx_completion_desc {
u8 reserved2[25];
} __packed;
/**
* struct iwl_rx_completion_desc_bz - Bz completion descriptor
* @rbid: unique tag of the received buffer
* @flags: flags (0: fragmented, all others: reserved)
* @reserved: reserved
*/
struct iwl_rx_completion_desc_bz {
__le16 rbid;
u8 flags;
u8 reserved[1];
} __packed;
/**
* struct iwl_rxq - Rx queue
* @id: queue index
@ -133,11 +145,7 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
union {
void *used_bd;
__le32 *bd_32;
struct iwl_rx_completion_desc *cd;
};
void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
@ -261,6 +269,20 @@ enum iwl_pcie_fw_reset_state {
FW_RESET_ERROR,
};
/**
* enum wl_pcie_imr_status - imr dma transfer state
* @IMR_D2S_IDLE: default value of the dma transfer
* @IMR_D2S_REQUESTED: dma transfer requested
* @IMR_D2S_COMPLETED: dma transfer completed
* @IMR_D2S_ERROR: dma transfer error
*/
enum iwl_pcie_imr_status {
IMR_D2S_IDLE,
IMR_D2S_REQUESTED,
IMR_D2S_COMPLETED,
IMR_D2S_ERROR,
};
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
* @imr_status: imr dma state machine
* @wait_queue_head_t: imr wait queue for dma completion
* @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
@ -363,7 +387,7 @@ struct iwl_trans_pcie {
/* PCI bus related data */
struct pci_dev *pci_dev;
void __iomem *hw_base;
u8 __iomem *hw_base;
bool ucode_write_complete;
bool sx_complete;
@ -414,7 +438,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
enum iwl_pcie_imr_status imr_status;
wait_queue_head_t imr_waitq;
char rf_name[32];
};
@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
#endif /* __iwl_trans_int_pcie_h__ */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2003-2014, 2018-2021 Intel Corporation
* Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
if (trans->trans_cfg->mq_rx_supported)
if (!trans->trans_cfg->mq_rx_supported)
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
HBUS_TARG_WRPTR_RX_Q(rxq->id));
else
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@ -231,8 +234,12 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
#if defined(__linux__)
IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
#elif defined(__FreeBSD__)
IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
(u32)rxb->vid, rxq->id, rxq->write);
#endif
}
/*
@ -652,23 +659,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
struct iwl_rx_transfer_desc *rx_td;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_transfer_desc);
if (use_rx_td)
return sizeof(*rx_td);
else
return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
return trans->trans_cfg->mq_rx_supported ?
sizeof(__le64) : sizeof(__le32);
}
static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
{
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return sizeof(struct iwl_rx_completion_desc_bz);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_completion_desc);
return sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
bool use_rx_td = (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
int free_size = iwl_pcie_free_bd_size(trans);
if (rxq->bd)
dma_free_coherent(trans->dev,
@ -682,8 +696,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd)
dma_free_coherent(trans->dev,
(use_rx_td ? sizeof(*rxq->cd) :
sizeof(__le32)) * rxq->queue_size,
iwl_pcie_used_bd_size(trans) *
rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
@ -707,7 +721,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else
rxq->queue_size = RX_QUEUE_SIZE;
free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
free_size = iwl_pcie_free_bd_size(trans);
/*
* Allocate the circular buffer of Read Buffer Descriptors
@ -720,14 +734,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
(use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
iwl_pcie_used_bd_size(trans) *
rxq->queue_size,
&rxq->used_bd_dma,
GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
rxq->rb_stts = (void *)((u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size);
rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
@ -1307,9 +1322,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxq->id, offset,
iwl_get_cmd_string(trans,
iwl_cmd_id(pkt->hdr.cmd,
pkt->hdr.group_id,
0)),
WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
pkt->hdr.group_id, pkt->hdr.cmd,
le16_to_cpu(pkt->hdr.sequence));
@ -1319,7 +1332,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
/* check that what the device tells us made sense */
if (offset > max_len)
if (len < sizeof(*pkt) || offset > max_len)
break;
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
@ -1419,6 +1432,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
@ -1426,11 +1440,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
vid = le16_to_cpu(rxq->cd[i].rbid);
*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_completion_desc *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
__le32 *cd = rxq->used_bd;
vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
@ -1608,10 +1631,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
if (WARN_ONCE(!rxq,
"[%d] Got MSI-X interrupt before we have Rx queues",
entry->entry))
if (!rxq) {
if (net_ratelimit())
IWL_ERR(trans,
"[%d] Got MSI-X interrupt before we have Rx queues\n",
entry->entry);
return IRQ_NONE;
}
lock_map_acquire(&trans->sync_cmd_lockdep_map);
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
@ -1958,7 +1984,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
}
/* Sending RX interrupt require many steps to be done in the
* the device:
* device:
* 1- write interrupt to current index in ICT table.
* 2- dma RX frame.
* 3- update RX shared data to indicate last write index.
@ -2002,6 +2028,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Wake up uCode load routine, now that load is complete */
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
/* Wake up IMR write routine, now that write to SRAM is complete */
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
trans_pcie->imr_status = IMR_D2S_COMPLETED;
wake_up(&trans_pcie->ucode_write_waitq);
}
}
if (inta & ~handled) {
@ -2217,7 +2248,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
trans_pcie->imr_status == IMR_D2S_REQUESTED) {
IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
isr_stats->tx++;
/* Wake up IMR routine once write to SRAM is complete */
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
trans_pcie->imr_status = IMR_D2S_COMPLETED;
wake_up(&trans_pcie->ucode_write_waitq);
}
} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
/*
@ -2226,6 +2267,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
/* Wake up IMR routine once write to SRAM is complete */
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
trans_pcie->imr_status = IMR_D2S_COMPLETED;
wake_up(&trans_pcie->ucode_write_waitq);
}
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@ -2240,7 +2287,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh);
isr_stats->sw++;
/* during FW reset flow report errors from there */
if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
trans_pcie->imr_status = IMR_D2S_ERROR;
wake_up(&trans_pcie->imr_waitq);
} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
trans_pcie->fw_reset_state = FW_RESET_ERROR;
wake_up(&trans_pcie->fw_reset_waitq);
} else {
@ -2274,7 +2324,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
/*
* In some rare cases when the HW is in a bad state, we may
* get this interrupt too early, when prph_info is still NULL.
* So make sure that it's not NULL to prevent crashing.
*/
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||

View File

@ -84,7 +84,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
iwl_trans_sw_reset(trans);
iwl_trans_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
@ -108,9 +108,12 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
else
else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
else
iwl_write32(trans, CSR_DOORBELL_VECTOR,
UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
/* wait 200ms */
ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
@ -169,7 +172,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
iwl_trans_sw_reset(trans);
/* re-take ownership to prevent other users from stealing the device */
iwl_trans_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@ -199,9 +203,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
* interrupt
*/
iwl_enable_rfkill_int(trans);
/* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2007-2015, 2018-2020 Intel Corporation
* Copyright (C) 2007-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -27,6 +27,7 @@
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
@ -73,12 +74,20 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
if (pci_read_config_dword(pdev, i, ptr))
goto err_read;
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
*ptr = iwl_read32(trans, i);
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (pos) {
@ -86,7 +95,12 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
if (pci_read_config_dword(pdev, pos + i, ptr))
goto err_read;
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
32, 4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
}
/* Print parent device registers next */
@ -101,7 +115,11 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
if (pci_read_config_dword(pdev, i, ptr))
goto err_read;
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
/* Print root port AER registers */
pos = 0;
@ -115,19 +133,29 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
if (pci_read_config_dword(pdev, pos + i, ptr))
goto err_read;
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
}
goto out;
err_read:
#if defined(__linux__)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
#elif defined(__FreeBSD__)
iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i);
#endif
IWL_ERR(trans, "Read failed at 0x%X\n", i);
out:
trans_pcie->pcie_dbg_dumped_once = 1;
kfree(buf);
}
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@ -137,6 +165,11 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_RESET,
CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
if (retake_ownership)
return iwl_pcie_prepare_card_hw(trans);
return 0;
}
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
@ -382,9 +415,11 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
iwl_trans_pcie_sw_reset(trans);
ret = iwl_trans_pcie_sw_reset(trans, true);
if (!ret)
ret = iwl_finish_nic_init(trans);
ret = iwl_finish_nic_init(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
@ -409,7 +444,10 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
iwl_trans_pcie_sw_reset(trans);
ret = iwl_trans_pcie_sw_reset(trans, true);
if (ret)
IWL_ERR(trans,
"iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@ -515,7 +553,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
return;
}
iwl_trans_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
@ -595,8 +633,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
ret = iwl_pcie_set_hw_ready(trans);
/* If the card is ready, exit 0 */
if (ret >= 0)
if (ret >= 0) {
trans->csme_own = false;
return 0;
}
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@ -609,8 +649,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
do {
ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0)
if (ret >= 0) {
trans->csme_own = false;
return 0;
}
if (iwl_mei_is_connected()) {
IWL_DEBUG_INFO(trans,
"Couldn't prepare the card but SAP is connected\n");
trans->csme_own = true;
if (trans->trans_cfg->device_family !=
IWL_DEVICE_FAMILY_9000)
IWL_ERR(trans,
"SAP not supported for this NIC family\n");
return -EBUSY;
}
usleep_range(200, 1000);
t += 200;
@ -1245,7 +1299,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
iwl_trans_pcie_sw_reset(trans);
/* re-take ownership to prevent other users from stealing the device */
iwl_trans_pcie_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@ -1275,9 +1330,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* interrupt
*/
iwl_enable_rfkill_int(trans);
/* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
@ -1483,33 +1535,54 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_set_pwr(trans, true);
}
static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
UREG_DOORBELL_TO_ISR6_RESUME);
} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
CSR_IPC_SLEEP_CONTROL_RESUME);
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
} else {
return 0;
}
ret = wait_event_timeout(trans_pcie->sx_waitq,
trans_pcie->sx_complete, 2 * HZ);
/* Invalidate it toward next suspend or resume */
trans_pcie->sx_complete = false;
if (!ret) {
IWL_ERR(trans, "Timeout %s D3\n",
suspend ? "entering" : "exiting");
return -ETIMEDOUT;
}
return 0;
}
static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
bool reset)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!reset)
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_SUSPEND);
ret = iwl_pcie_d3_handshake(trans, true);
if (ret)
return ret;
ret = wait_event_timeout(trans_pcie->sx_waitq,
trans_pcie->sx_complete, 2 * HZ);
/*
* Invalidate it toward resume.
*/
trans_pcie->sx_complete = false;
if (!ret) {
IWL_ERR(trans, "Timeout entering D3\n");
return -ETIMEDOUT;
}
}
iwl_pcie_d3_complete_suspend(trans, test, reset);
return 0;
@ -1526,6 +1599,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (test) {
iwl_enable_interrupts(trans);
*status = IWL_D3_STATUS_ALIVE;
ret = 0;
goto out;
}
@ -1574,25 +1648,10 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
*status = IWL_D3_STATUS_ALIVE;
out:
if (*status == IWL_D3_STATUS_ALIVE &&
trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
trans_pcie->sx_complete = false;
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_RESUME);
if (*status == IWL_D3_STATUS_ALIVE)
ret = iwl_pcie_d3_handshake(trans, false);
ret = wait_event_timeout(trans_pcie->sx_waitq,
trans_pcie->sx_complete, 2 * HZ);
/*
* Invalidate it toward next suspend.
*/
trans_pcie->sx_complete = false;
if (!ret) {
IWL_ERR(trans, "Timeout exiting D3\n");
return -ETIMEDOUT;
}
}
return 0;
return ret;
}
static void
@ -1779,9 +1838,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
iwl_trans_pcie_sw_reset(trans);
return 0;
return iwl_trans_pcie_sw_reset(trans, true);
}
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
@ -1801,7 +1858,9 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (err)
return err;
iwl_trans_pcie_sw_reset(trans);
err = iwl_trans_pcie_sw_reset(trans, true);
if (err)
return err;
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
trans->trans_cfg->integrated) {
@ -1942,6 +2001,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@ -2857,7 +2917,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@ -3462,7 +3522,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
.interrupts = iwl_trans_pci_interrupts, \
.sync_nmi = iwl_trans_pcie_sync_nmi \
.sync_nmi = iwl_trans_pcie_sync_nmi, \
.imr_dma_data = iwl_trans_pcie_copy_imr \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
@ -3547,6 +3608,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
@ -3628,8 +3690,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* in the old format.
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
trans->hw_rev_step = trans->hw_rev & 0xF;
else
trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
@ -3677,3 +3740,41 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
iwl_trans_free(trans);
return ERR_PTR(ret);
}
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt)
{
iwl_write_prph(trans, IMR_UREG_CHICK,
iwl_read_prph(trans, IMR_UREG_CHICK) |
IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
(u32)(src_addr & 0xFFFFFFFF));
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
iwl_get_dma_hi_addr(src_addr));
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
}
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = -1;
trans_pcie->imr_status = IMR_D2S_REQUESTED;
iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
ret = wait_event_timeout(trans_pcie->imr_waitq,
trans_pcie->imr_status !=
IMR_D2S_REQUESTED, 5 * HZ);
if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
iwl_trans_pcie_dump_regs(trans);
return -ETIMEDOUT;
}
trans_pcie->imr_status = IMR_D2S_IDLE;
return 0;
}

View File

@ -545,7 +545,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
trans->cfg->min_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
@ -599,7 +599,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
trans->cfg->min_ba_txq_size);
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@ -882,7 +882,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
iwl_trans_write_mem(trans, stts_addr, zero_val,
iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
ARRAY_SIZE(zero_val));
}
@ -1206,7 +1206,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2020-2021 Intel Corporation
* Copyright (C) 2020-2022 Intel Corporation
*/
#ifdef CONFIG_INET
#include <net/tso.h>
@ -9,7 +9,9 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "fw/api/commands.h"
#include "fw/api/tx.h"
#include "fw/api/datapath.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
@ -46,13 +48,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans->txqs.bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
@ -194,8 +196,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
return NULL;
/* set the chaining pointer to the previous page if there */
*(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) =
*page_ptr;
*(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*page_ptr = ret;
return ret;
@ -320,8 +321,7 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
return NULL;
p->pos = page_address(p->page);
/* set the chaining pointer to NULL */
*(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) =
NULL;
*(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
out:
*page_ptr = p->page;
get_page(p->page);
@ -1092,9 +1092,8 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
return -ENOMEM;
}
static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
struct iwl_txq **intxq, int size,
unsigned int timeout)
static struct iwl_txq *
iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
{
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
@ -1106,18 +1105,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
bc_tbl_entries = bc_tbl_size / sizeof(u16);
if (WARN_ON(size > bc_tbl_entries))
return -EINVAL;
return ERR_PTR(-EINVAL);
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
ret = iwl_txq_alloc(trans, txq, size, false);
@ -1133,12 +1132,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
txq->wd_timeout = msecs_to_jiffies(timeout);
*intxq = txq;
return 0;
return txq;
error:
iwl_txq_gen2_free_memory(trans, txq);
return ret;
return ERR_PTR(ret);
}
static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
@ -1195,30 +1193,57 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
return ret;
}
int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size, unsigned int timeout)
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
u8 tid, int size, unsigned int timeout)
{
struct iwl_txq *txq = NULL;
struct iwl_tx_queue_cfg_cmd cmd = {
.flags = flags,
.sta_id = sta_id,
.tid = tid,
};
struct iwl_txq *txq;
union {
struct iwl_tx_queue_cfg_cmd old;
struct iwl_scd_queue_cfg_cmd new;
} cmd;
struct iwl_host_cmd hcmd = {
.id = cmd_id,
.len = { sizeof(cmd) },
.data = { &cmd, },
.flags = CMD_WANT_SKB,
};
int ret;
ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
if (ret)
return ret;
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
if (IS_ERR(txq))
return PTR_ERR(txq);
cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
if (trans->txqs.queue_alloc_cmd_ver == 0) {
memset(&cmd.old, 0, sizeof(cmd.old));
cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
cmd.old.tid = tid;
if (hweight32(sta_mask) != 1) {
ret = -EINVAL;
goto error;
}
cmd.old.sta_id = ffs(sta_mask) - 1;
hcmd.id = SCD_QUEUE_CFG;
hcmd.len[0] = sizeof(cmd.old);
hcmd.data[0] = &cmd.old;
} else if (trans->txqs.queue_alloc_cmd_ver == 3) {
memset(&cmd.new, 0, sizeof(cmd.new));
cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
cmd.new.u.add.flags = cpu_to_le32(flags);
cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
cmd.new.u.add.tid = tid;
hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
hcmd.len[0] = sizeof(cmd.new);
hcmd.data[0] = &cmd.new;
} else {
ret = -EOPNOTSUPP;
goto error;
}
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@ -1316,10 +1341,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
dma_addr_t hi_len;
if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
return (dma_addr_t)(le64_to_cpu(tb->addr));
return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
}
tfd = _tfd;
@ -1762,8 +1787,11 @@ static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
&trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
}
ret = -EIO;
goto cancel;
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020-2021 Intel Corporation
* Copyright (C) 2020-2022 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
int iwl_txq_dyn_alloc(struct iwl_trans *trans,
__le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int timeout);
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid,
int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
struct iwl_tfd *tfd;
if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tfd *tfh_tfd = _tfd;
return le16_to_cpu(tfd->num_tbs) & 0x1f;
return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
}
tfd = (struct iwl_tfd *)_tfd;
@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
return le16_to_cpu(tb->tb_len);
return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;

View File

@ -46,8 +46,9 @@ CFLAGS+= -DCONFIG_IWLMVM=1
#CFLAGS+= -DCONFIG_IPV6=1
#CFLAGS+= -DCONFIG_IWLWIFI_BCAST_FILTERING=1
CFLAGS+= -DCONFIG_IWLWIFI_DEBUG=1
#CFLAGS+= -DCONFIG_IWLWIFI_DEBUGFS=1
#CFLAGS+= -DCONFIG_IWLWIFI_LEDS=1
CFLAGS+= -DCONFIG_IWLWIFI_OPMODE_MODULAR=1
#CFLAGS+= -DCONFIG_IWLWIFI_OPMODE_MODULAR=1
CFLAGS+= -DCONFIG_IWLWIFI_DEVICE_TRACING=1
#CFLAGS+= -DCONFIG_LOCKDEP=1
#CFLAGS+= -DCONFIG_MAC80211_DEBUGFS=1