qlxgbe: clean up empty lines in .c and .h files

This commit is contained in:
Mateusz Guzik 2020-09-01 21:56:55 +00:00
parent d7c853dee0
commit 2c4a3d0a2c
19 changed files with 36 additions and 233 deletions

View File

@ -10962,4 +10962,3 @@ unsigned char ql83xx_bootloader[] = {
0x00, 0x00, 0x00, 0x00, 0x9b, 0x64, 0x92, 0x0e
};
unsigned int ql83xx_bootloader_len = 131072;

View File

@ -153,7 +153,7 @@ void ql_dump_buf8(qla_host_t *ha, const char *msg, void *dbuf, uint32_t len)
buf = dbuf;
device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len);
while (len >= 16) {
device_printf(dev,"0x%08x:"
" %02x %02x %02x %02x %02x %02x %02x %02x"
@ -256,7 +256,6 @@ void ql_dump_buf8(qla_host_t *ha, const char *msg, void *dbuf, uint32_t len)
default:
break;
}
device_printf(dev, "%s: %s dump end\n", __func__, msg);
}

View File

@ -102,5 +102,4 @@ extern void ql_dump_buf32(qla_host_t *ha, const char *str, void *dbuf,
#endif
#endif /* #ifndef _QL_DBG_H_ */

View File

@ -170,7 +170,7 @@ struct qla_host {
int msix_count;
qla_ivec_t irq_vec[MAX_SDS_RINGS];
/* parent dma tag */
bus_dma_tag_t parent_tag;
@ -228,7 +228,7 @@ struct qla_host {
struct task stats_task;
struct taskqueue *stats_tq;
uint32_t fw_ver_major;
uint32_t fw_ver_minor;
uint32_t fw_ver_sub;

View File

@ -149068,4 +149068,3 @@ unsigned char ql83xx_firmware[] = {
0x36, 0x37, 0x20, 0x0a
};
unsigned int ql83xx_firmware_len = 1788328;

View File

@ -124,5 +124,4 @@ extern void ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
extern void ql_alloc_sp_log_buffer(qla_host_t *ha);
extern void ql_free_sp_log_buffer(qla_host_t *ha);
#endif /* #ifndef_QL_GLBL_H_ */

View File

@ -98,9 +98,8 @@ qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
{
int err, ret = 0;
qla_host_t *ha;
err = sysctl_handle_int(oidp, &ret, 0, req);
err = sysctl_handle_int(oidp, &ret, 0, req);
if (err || !req->newptr)
return (err);
@ -147,7 +146,6 @@ qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
ha = (qla_host_t *)arg1;
if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
err = qla_get_port_config(ha, &cfg_bits);
if (err)
@ -215,7 +213,6 @@ qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
err = qla_set_cam_search_mode(ha, (uint32_t)ret);
QLA_UNLOCK(ha, __func__);
@ -604,7 +601,6 @@ qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
children = SYSCTL_CHILDREN(ctx_oid);
for (i = 0; i < ha->hw.num_tx_rings; i++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str), "%d", i);
@ -783,7 +779,6 @@ qlnx_add_drvr_sds_stats(qla_host_t *ha)
children = SYSCTL_CHILDREN(ctx_oid);
for (i = 0; i < ha->hw.num_sds_rings; i++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str), "%d", i);
@ -822,7 +817,6 @@ qlnx_add_drvr_rds_stats(qla_host_t *ha)
children = SYSCTL_CHILDREN(ctx_oid);
for (i = 0; i < ha->hw.num_rds_rings; i++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str), "%d", i);
@ -867,7 +861,6 @@ qlnx_add_drvr_tx_stats(qla_host_t *ha)
children = SYSCTL_CHILDREN(ctx_oid);
for (i = 0; i < ha->hw.num_tx_rings; i++) {
bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
snprintf(name_str, sizeof(name_str), "%d", i);
@ -1162,7 +1155,6 @@ ql_hw_link_status(qla_host_t *ha)
}
switch (ha->hw.module_type) {
case 0x01:
device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
break;
@ -1282,7 +1274,7 @@ ql_alloc_dma(qla_host_t *ha)
hw->dma_buf.tx_ring.alignment = 8;
hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
device_printf(dev, "%s: tx ring alloc failed\n", __func__);
goto ql_alloc_dma_exit;
@ -1290,7 +1282,7 @@ ql_alloc_dma(qla_host_t *ha)
vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
paddr = hw->dma_buf.tx_ring.dma_addr;
for (i = 0; i < ha->hw.num_tx_rings; i++) {
tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
@ -1321,7 +1313,6 @@ ql_alloc_dma(qla_host_t *ha)
*/
for (i = 0; i < hw->num_rds_rings; i++) {
hw->dma_buf.rds_ring[i].alignment = 8;
hw->dma_buf.rds_ring[i].size =
(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
@ -1415,7 +1406,6 @@ qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
i = Q8_MBX_MSEC_DELAY;
while (i) {
if (ha->qla_initiate_recovery) {
ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
return (-1);
@ -1449,10 +1439,8 @@ qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
i = Q8_MBX_MSEC_DELAY;
while (i) {
if (ha->qla_initiate_recovery) {
ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
return (-1);
@ -1483,7 +1471,6 @@ qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
}
for (i = 0; i < n_fwmbox; i++) {
if (ha->qla_initiate_recovery) {
ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0);
return (-1);
@ -1512,7 +1499,6 @@ qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
}
ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]);
exit_qla_mbx_cmd:
return (ret);
}
@ -1742,7 +1728,7 @@ qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
q80_config_intr_coalesc_rsp_t *intrc_rsp;
uint32_t err, i;
device_t dev = ha->pci_dev;
intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
@ -1786,11 +1772,10 @@ qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
return (-1);
}
return 0;
}
/*
* Name: qla_config_mac_addr
* Function: binds a MAC address to the context/interface.
@ -1857,11 +1842,10 @@ qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
}
return (-1);
}
return 0;
}
/*
* Name: qla_set_mac_rcv_mode
* Function: Enable/Disable AllMulticast and Promiscous Modes.
@ -1899,7 +1883,7 @@ qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
return (-1);
}
return 0;
}
@ -2284,7 +2268,6 @@ qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
dev = ha->pci_dev;
eh = mtod(mp, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
@ -2523,7 +2506,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
eh = mtod(mp, struct ether_vlan_header *);
if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
src = frame_hdr;
@ -2586,7 +2568,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
if (nsegs > ha->hw.max_tx_segs)
ha->hw.max_tx_segs = nsegs;
@ -2611,7 +2592,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
} else if (mp->m_flags & M_VLANTAG) {
if (hdr_len) { /* TSO */
tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
Q8_TX_CMD_FLAGS_HW_VLAN_ID);
@ -2628,7 +2608,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
}
}
tx_cmd->n_bufs = (uint8_t)nsegs;
tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
@ -2638,7 +2617,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
while (1) {
for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
switch (i) {
case 0:
tx_cmd->buf1_addr = c_seg->ds_addr;
@ -2678,7 +2656,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
}
if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
/* TSO : Copy the header in the following tx cmd descriptors */
txr_next = hw->tx_cntxt[txr_idx].txr_next;
@ -2709,7 +2686,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
/* bytes left in TxCmd Entry */
bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
bcopy(src, dst, bytes);
src += bytes;
hdr_len -= bytes;
@ -2751,8 +2727,6 @@ ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
return (0);
}
#define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
static int
qla_config_rss_ind_table(qla_host_t *ha)
@ -2760,14 +2734,12 @@ qla_config_rss_ind_table(qla_host_t *ha)
uint32_t i, count;
uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
}
for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
i = i + Q8_CONFIG_IND_TBL_SIZE) {
if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
} else {
@ -2858,7 +2830,6 @@ qla_free_soft_lro(qla_host_t *ha)
return;
}
/*
* Name: ql_del_hw_if
* Function: Destroys the hardware specific entities corresponding to an
@ -2879,7 +2850,6 @@ ql_del_hw_if(qla_host_t *ha)
if (ha->hw.flags.init_intr_cnxt) {
for (i = 0; i < ha->hw.num_sds_rings; ) {
if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
num_msix = Q8_MAX_INTR_VECTORS;
else
@ -2947,16 +2917,13 @@ ql_init_hw_if(qla_host_t *ha)
}
for (i = 0; i < ha->hw.num_sds_rings; ) {
if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
num_msix = Q8_MAX_INTR_VECTORS;
else
num_msix = ha->hw.num_sds_rings - i;
if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
if (i > 0) {
num_msix = i;
for (i = 0; i < num_msix; ) {
@ -3225,9 +3192,7 @@ qla_init_rcv_cntxt(qla_host_t *ha)
ha->hw.flags.init_rx_cnxt = 1;
if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
max_idx = MAX_RCNTXT_SDS_RINGS;
else
@ -3242,9 +3207,7 @@ qla_init_rcv_cntxt(qla_host_t *ha)
}
if (hw->num_rds_rings > 1) {
for (i = 0; i < hw->num_rds_rings; ) {
if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
max_idx = MAX_SDS_TO_RDS_MAP;
else
@ -3282,7 +3245,6 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
add_rcv->cntxt_id = hw->rcv_cntxt_id;
for (i = 0; i < nsds; i++) {
j = i + sds_idx;
add_rcv->sds[i].paddr =
@ -3293,7 +3255,6 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
}
for (i = 0; (i < nsds); i++) {
@ -3312,7 +3273,6 @@ qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
qla_host_to_le32(NUM_RX_DESCRIPTORS);
}
if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
(sizeof (q80_rq_add_rcv_rings_t) >> 2),
ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
@ -3360,14 +3320,12 @@ qla_del_rcv_cntxt(qla_host_t *ha)
return;
if (ha->hw.flags.bcast_mac) {
bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
return;
ha->hw.flags.bcast_mac = 0;
}
if (ha->hw.flags.unicast_mac) {
@ -3490,7 +3448,6 @@ qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
return (0);
}
/*
* Name: qla_del_xmt_cntxt
* Function: Destroys the Transmit Context.
@ -3587,7 +3544,6 @@ qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
(ha->hw.mcast[i].addr[3] != 0) ||
(ha->hw.mcast[i].addr[4] != 0) ||
(ha->hw.mcast[i].addr[5] != 0)) {
bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
mcast = mcast + ETHER_ADDR_LEN;
count++;
@ -3671,14 +3627,12 @@ qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
int i;
for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
if ((ha->hw.mcast[i].addr[0] == 0) &&
(ha->hw.mcast[i].addr[1] == 0) &&
(ha->hw.mcast[i].addr[2] == 0) &&
(ha->hw.mcast[i].addr[3] == 0) &&
(ha->hw.mcast[i].addr[4] == 0) &&
(ha->hw.mcast[i].addr[5] == 0)) {
bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
ha->hw.nmcast++;
@ -3688,7 +3642,6 @@ qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
if (nmcast == 0)
break;
}
}
return 0;
}
@ -3700,7 +3653,6 @@ qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
ha->hw.mcast[i].addr[0] = 0;
ha->hw.mcast[i].addr[1] = 0;
ha->hw.mcast[i].addr[2] = 0;
@ -3812,7 +3764,6 @@ ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
while (comp_idx != hw_tx_cntxt->txr_comp) {
txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
hw_tx_cntxt->txr_comp++;
@ -3845,7 +3796,7 @@ ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\
__func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \
hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp));
return;
}
@ -3917,7 +3868,6 @@ ql_hw_check_health(qla_host_t *ha)
ha->hw.hbeat_failure++;
if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
__func__, val);
@ -4182,7 +4132,6 @@ qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
return (0);
}
static int
qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
{
@ -4211,7 +4160,6 @@ qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
if (qla_mbx_cmd(ha, (uint32_t *) md_size,
(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
device_printf(dev, "%s: failed\n", __func__);
return (-1);
@ -4301,7 +4249,6 @@ ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
}
if (etype == ETHERTYPE_IP) {
offset = (hdrlen + sizeof (struct ip));
if (mp->m_len >= offset) {
@ -4312,10 +4259,9 @@ ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
}
if (ip->ip_p == IPPROTO_TCP) {
hdrlen += ip->ip_hl << 2;
offset = hdrlen + 4;
if (mp->m_len >= offset) {
th = (struct tcphdr *)(mp->m_data + hdrlen);
} else {
@ -4325,7 +4271,6 @@ ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
}
} else if (etype == ETHERTYPE_IPV6) {
offset = (hdrlen + sizeof (struct ip6_hdr));
if (mp->m_len >= offset) {
@ -4336,7 +4281,6 @@ ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
}
if (ip6->ip6_nxt == IPPROTO_TCP) {
hdrlen += sizeof(struct ip6_hdr);
offset = hdrlen + 4;
@ -4397,7 +4341,6 @@ ql_get_minidump_template(qla_host_t *ha)
(sizeof(q80_config_md_templ_cmd_t) >> 2),
ha->hw.mbox,
(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
device_printf(dev, "%s: failed\n", __func__);
return (-1);
@ -4471,7 +4414,6 @@ static uint32_t ql_cntrl(qla_host_t *ha,
ql_minidump_template_hdr_t *template_hdr,
ql_minidump_entry_cntrl_t *crbEntry);
static uint32_t
ql_minidump_size(qla_host_t *ha)
{
@ -4562,7 +4504,6 @@ ql_alloc_minidump_buffers(qla_host_t *ha)
return (ret);
}
static uint32_t
ql_validate_minidump_checksum(qla_host_t *ha)
{
@ -4611,7 +4552,6 @@ ql_minidump_init(qla_host_t *ha)
#ifdef QL_LDFLASH_FW
if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
return (-1);
@ -4628,11 +4568,9 @@ ql_minidump_init(qla_host_t *ha)
#endif /* #ifdef QL_LDFLASH_FW */
if (ret == 0) {
ret = ql_validate_minidump_checksum(ha);
if (ret == 0) {
ret = ql_alloc_minidump_buffers(ha);
if (ret == 0)
@ -4689,7 +4627,7 @@ ql_minidump(qla_host_t *ha)
ha->hw.mdump_template_size);
ql_parse_template(ha);
ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
ha->hw.mdump_done = 1;
@ -4697,7 +4635,6 @@ ql_minidump(qla_host_t *ha)
return;
}
/*
* helper routines
*/
@ -4711,7 +4648,6 @@ ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
return;
}
static int
ql_parse_template(qla_host_t *ha)
{
@ -4729,7 +4665,7 @@ ql_parse_template(qla_host_t *ha)
if (template_hdr->entry_type == TLHDR)
sane_start = 1;
dump_buff = (char *) ha->hw.mdump_buffer;
num_of_entries = template_hdr->num_of_entries;
@ -4752,14 +4688,12 @@ ql_parse_template(qla_host_t *ha)
__func__, sane_start, num_of_entries, capture_mask, dump_size));
for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
/*
* If the capture_mask of the entry does not match capture mask
* skip the entry after marking the driver_flags indicator.
*/
if (!(entry->hdr.entry_capture_mask & capture_mask)) {
entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
entry = (ql_minidump_entry_t *) ((char *) entry
+ entry->hdr.entry_size);
@ -4907,7 +4841,7 @@ ql_parse_template(qla_host_t *ha)
"\n%s: Template configuration error. Check Template\n",
__func__);
}
QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
__func__, template_hdr->num_of_entries));
@ -4930,7 +4864,6 @@ ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
stride = crb_entry->addr_stride;
for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
ret = ql_rdwr_indreg32(ha, addr, &value, 1);
if (ret)
@ -4978,7 +4911,6 @@ ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
read_cnt = cacheEntry->read_addr_cnt;
for (i = 0; i < loop_cnt; i++) {
ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
if (ret)
return (0);
@ -5002,7 +4934,6 @@ ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
cntl_value_r = (uint8_t)data;
while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
if (timeout) {
qla_mdelay(__func__, 1);
timeout--;
@ -5030,7 +4961,6 @@ ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
addr = read_addr;
for (k = 0; k < read_cnt; k++) {
ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
if (ret)
return (0);
@ -5075,7 +5005,6 @@ ql_L1Cache(qla_host_t *ha,
read_cnt = cacheEntry->read_addr_cnt;
for (i = 0; i < loop_cnt; i++) {
ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
if (ret)
return (0);
@ -5086,7 +5015,6 @@ ql_L1Cache(qla_host_t *ha,
addr = read_addr;
for (k = 0; k < read_cnt; k++) {
ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
if (ret)
return (0);
@ -5145,7 +5073,6 @@ ql_rdmem(qla_host_t *ha,
loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
for (i = 0; i < loop_cnt; i++) {
ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
if (ret)
return (0);
@ -5180,7 +5107,6 @@ ql_rdrom(qla_host_t *ha,
loop_cnt /= sizeof(value);
for (i = 0; i < loop_cnt; i++) {
ret = ql_rd_flash32(ha, addr, &value);
if (ret)
return (0);
@ -5211,7 +5137,6 @@ ql_rdmux(qla_host_t *ha,
read_addr = muxEntry->read_addr;
for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
if (ret)
return (0);
@ -5253,7 +5178,6 @@ ql_rdmux2(qla_host_t *ha,
for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
loop_cnt++) {
uint32_t temp_sel_val;
ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
@ -5318,7 +5242,6 @@ ql_rdqueue(qla_host_t *ha,
for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
loop_cnt++) {
ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
if (ret)
return (0);
@ -5326,7 +5249,6 @@ ql_rdqueue(qla_host_t *ha,
read_addr = queueEntry->read_addr;
for (k = 0; k < read_cnt; k++) {
ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
if (ret)
return (0);
@ -5361,7 +5283,6 @@ ql_cntrl(qla_host_t *ha,
opcode = crbEntry->opcode;
if (opcode & QL_DBG_OPCODE_WR) {
ret = ql_rdwr_indreg32(ha, entry_addr,
&crbEntry->value_1, 0);
if (ret)
@ -5371,7 +5292,6 @@ ql_cntrl(qla_host_t *ha,
}
if (opcode & QL_DBG_OPCODE_RW) {
ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
if (ret)
return (0);
@ -5384,7 +5304,6 @@ ql_cntrl(qla_host_t *ha,
}
if (opcode & QL_DBG_OPCODE_AND) {
ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
if (ret)
return (0);
@ -5403,7 +5322,6 @@ ql_cntrl(qla_host_t *ha,
}
if (opcode & QL_DBG_OPCODE_OR) {
ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
if (ret)
return (0);
@ -5418,7 +5336,6 @@ ql_cntrl(qla_host_t *ha,
}
if (opcode & QL_DBG_OPCODE_POLL) {
opcode &= ~QL_DBG_OPCODE_POLL;
timeout = crbEntry->poll_timeout;
addr = entry_addr;
@ -5429,7 +5346,6 @@ ql_cntrl(qla_host_t *ha,
while ((read_value & crbEntry->value_2)
!= crbEntry->value_1) {
if (timeout) {
qla_mdelay(__func__, 1);
timeout--;
@ -5555,7 +5471,6 @@ ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
data_size = entry->data_size;
for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
if (ret)
return (0);
@ -5563,7 +5478,6 @@ ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
wait_count = 0;
while (wait_count < poll) {
uint32_t temp;
ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
@ -5600,7 +5514,6 @@ ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
return (loop_cnt * (2 * sizeof(uint32_t)));
}
/*
* Handling rd modify write poll entry.
*/
@ -5625,14 +5538,12 @@ ql_pollrd_modify_write(qla_host_t *ha,
modify_mask = entry->modify_mask;
data_size = entry->data_size;
ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
if (ret)
return (0);
wait_count = 0;
while (wait_count < poll) {
uint32_t temp;
ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
@ -5649,7 +5560,6 @@ ql_pollrd_modify_write(qla_host_t *ha,
device_printf(ha->pci_dev, "%s Error in processing entry\n",
__func__);
} else {
ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
if (ret)
return (0);
@ -5667,7 +5577,6 @@ ql_pollrd_modify_write(qla_host_t *ha,
/* Poll again */
wait_count = 0;
while (wait_count < poll) {
uint32_t temp;
ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
@ -5688,5 +5597,3 @@ ql_pollrd_modify_write(qla_host_t *ha,
*/
return (2 * sizeof(uint32_t));
}

View File

@ -266,8 +266,6 @@
#define Q8_MBX_GET_PORT_CONFIG 0x0067
#define Q8_MBX_GET_LINK_STATUS 0x0068
/*
* Mailbox Command Response
*/
@ -925,7 +923,6 @@ typedef struct _q80_idc_ack_rsp {
uint16_t regcnt_status;
} __packed q80_idc_ack_rsp_t;
/*
* Set Port Configuration command
* Used to set Ethernet Standard Pause values
@ -1083,7 +1080,6 @@ typedef struct _q80_get_link_status_rsp {
} __packed q80_get_link_status_rsp_t;
/*
* Transmit Related Definitions
*/
@ -1209,7 +1205,6 @@ typedef struct _q80_tx_cmd {
#define Q8_TX_CMD_TSO_ALIGN 2
#define Q8_TX_MAX_NON_TSO_SEGS 62
/*
* Receive Related Definitions
*/
@ -1224,7 +1219,6 @@ typedef struct _q80_tx_cmd {
#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
#define MAX_RDS_RINGS MAX_SDS_RINGS /* Max# of Rcv Descriptor Rings */
typedef struct _q80_rq_sds_ring {
uint64_t paddr; /* physical addr of status ring in system memory */
uint64_t hdr_split1;
@ -1310,7 +1304,6 @@ typedef struct _q80_rcv_cntxt_destroy_rsp {
uint16_t regcnt_status;
} __packed q80_rcv_cntxt_destroy_rsp_t;
/*
* Add Receive Rings
*/
@ -1355,7 +1348,6 @@ typedef struct _q80_rq_map_sds_to_rds {
q80_sds_rds_map_e_t sds_rds[MAX_SDS_TO_RDS_MAP];
} __packed q80_rq_map_sds_to_rds_t;
typedef struct _q80_rsp_map_sds_to_rds {
uint16_t opcode;
uint16_t regcnt_status;
@ -1364,7 +1356,6 @@ typedef struct _q80_rsp_map_sds_to_rds {
q80_sds_rds_map_e_t sds_rds[MAX_SDS_TO_RDS_MAP];
} __packed q80_rsp_map_sds_to_rds_t;
/*
* Receive Descriptor corresponding to each entry in the receive ring
*/
@ -1434,7 +1425,6 @@ typedef struct _q80_stat_desc {
#define Q8_SGL_LRO_STAT_TS(data) ((data >> 40) & 0x1)
#define Q8_SGL_LRO_STAT_PUSH_BIT(data) ((data >> 41) & 0x1)
/*
* definitions specific to opcode 0x05 data[1]
*/
@ -1463,7 +1453,6 @@ typedef struct _q80_stat_desc {
#define NUM_TX_DESCRIPTORS 1024
#define NUM_STATUS_DESCRIPTORS 1024
#define NUM_RX_DESCRIPTORS 2048
/*
@ -1608,7 +1597,6 @@ typedef struct _qla_hw {
fdt_valid :1;
} flags;
volatile uint16_t link_speed;
volatile uint16_t cable_length;
volatile uint32_t cable_oui;
@ -1630,7 +1618,7 @@ typedef struct _qla_hw {
uint32_t num_tx_rings;
qla_dmabuf_t dma_buf;
/* Transmit Side */
qla_hw_tx_cntxt_t tx_cntxt[NUM_TX_RINGS];
@ -1679,7 +1667,7 @@ typedef struct _qla_hw {
uint32_t max_tx_segs;
uint32_t min_lro_pkt_size;
uint32_t enable_hw_lro;
uint32_t enable_soft_lro;
uint32_t enable_9kb;
@ -1740,7 +1728,6 @@ typedef struct _qla_hw {
#define QL_BUFFER_ALIGN 16
/*
* Flash Configuration
*/

View File

@ -35,10 +35,8 @@
#ifndef _QL_INLINE_H_
#define _QL_INLINE_H_
#define QL8_SEMLOCK_TIMEOUT 1000/* QLA8020 Semaphore Lock Timeout 10ms */
/*
* Inline functions for hardware semaphores
*/

View File

@ -34,7 +34,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "ql_os.h"
#include "ql_hw.h"
#include "ql_def.h"
@ -104,14 +103,12 @@ ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
qla_offchip_mem_val_t *mem;
} u;
if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
return ENXIO;
pci_dev= ha->pci_dev;
switch(cmd) {
case QLA_RDWR_REG:
u.rv = (qla_reg_val_t *)data;
@ -254,7 +251,6 @@ ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
fw_dump->saved = 1;
if (ha->offline) {
if (ha->enable_minidump)
ql_minidump(ha);
@ -268,7 +264,6 @@ ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
break;
}
} else {
#define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
if (!ha->hw.mdump_done) {
@ -284,7 +279,7 @@ ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
rval = ENXIO;
break;
}
#define QLNX_DUMP_WAIT_SECS 30
count = QLNX_DUMP_WAIT_SECS * 1000;
@ -354,8 +349,6 @@ ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
return rval;
}
static int
ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
{
@ -474,7 +467,7 @@ ql_capture_drvr_state(qla_host_t *ha)
return;
hdr = (qla_drvr_state_hdr_t *)state_buffer;
hdr->saved = 0;
if (hdr->drvr_version_major) {
@ -667,4 +660,3 @@ ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
return (rval);
}

View File

@ -281,5 +281,4 @@ typedef struct qla_sp_log qla_sp_log_t;
#define SP_TLOG_FMT_STR_14 \
"qla_init_locked [%ld]: \n"
#endif /* #ifndef _QL_IOCTL_H_ */

View File

@ -35,7 +35,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "ql_os.h"
#include "ql_hw.h"
#include "ql_def.h"
@ -54,7 +53,6 @@ qla_rcv_error(qla_host_t *ha)
QL_INITIATE_RECOVERY(ha);
}
/*
* Name: qla_rx_intr
* Function: Handles normal ethernet frames received
@ -76,12 +74,12 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
if (ha->hw.num_rds_rings > 1)
r_idx = sds_idx;
ha->hw.rds[r_idx].count++;
sdsp = &ha->hw.sds[sds_idx];
rx_ring = &ha->rx_ring[r_idx];
for (i = 0; i < sgc->num_handles; i++) {
rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
@ -112,7 +110,7 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
rxb->next = sdsp->rxb_free;
sdsp->rxb_free = rxb;
sdsp->rx_free++;
if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
/* log the error */
device_printf(ha->pci_dev,
@ -176,7 +174,6 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
#endif /* #if __FreeBSD_version >= 1100000 */
if (ha->hw.enable_soft_lro) {
#if (__FreeBSD_version >= 1100101)
tcp_lro_queue_mbuf(lro, mpf);
@ -187,7 +184,6 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
#endif /* #if (__FreeBSD_version >= 1100101) */
} else {
(*ifp->if_input)(ifp, mpf);
}
@ -227,11 +223,11 @@ qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
ha->hw.rds[r_idx].count++;
rx_ring = &ha->rx_ring[r_idx];
ha->hw.rds[r_idx].lro_pkt_count++;
sdsp = &ha->hw.sds[sds_idx];
pkt_length = sgc->payload_length + sgc->l4_offset;
if (sgc->flags & Q8_LRO_COMP_TS) {
@ -271,7 +267,7 @@ qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
rxb->next = sdsp->rxb_free;
sdsp->rxb_free = rxb;
sdsp->rx_free++;
if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
/* log the error */
device_printf(ha->pci_dev,
@ -325,7 +321,7 @@ qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
if (etype == ETHERTYPE_IP) {
ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
sgc->payload_length;
@ -408,7 +404,6 @@ qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
num_handles = -1;
switch (num_handles) {
case 1:
*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
break;
@ -512,7 +507,6 @@ ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
comp_idx = hw->sds[sds_idx].sdsr_next;
while (count-- && !ha->stop_rcv) {
sdesc = (q80_stat_desc_t *)
&hw->sds[sds_idx].sds_ring_base[comp_idx];
@ -522,7 +516,6 @@ ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
break;
switch (opcode) {
case Q8_STAT_DESC_OPCODE_RCV_PKT:
desc_count = 1;
@ -714,7 +707,6 @@ ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
sds_replenish_threshold += desc_count;
while (desc_count--) {
sdesc->data[0] = 0ULL;
sdesc->data[1] = 0ULL;
@ -752,7 +744,6 @@ ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
}
#endif /* #if (__FreeBSD_version >= 1100101) */
}
if (ha->stop_rcv)
@ -811,7 +802,6 @@ ql_mbx_isr(void *arg)
data = data & 0xFFFF;
switch (data) {
case 0x8001: /* It's an AEN */
ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
@ -840,7 +830,6 @@ ql_mbx_isr(void *arg)
if_link_state_change(ha->ifp, LINK_STATE_DOWN);
}
ha->hw.module_type = ((data >> 8) & 0xFF);
ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
@ -922,7 +911,6 @@ ql_mbx_isr(void *arg)
return;
}
static void
qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
{
@ -1009,4 +997,3 @@ ql_isr(void *arg)
return;
}

View File

@ -10964,4 +10964,3 @@ unsigned char ql83xx_minidump[] = {
0x00, 0x00, 0x00, 0x00, 0x34, 0x39, 0x7b, 0xbf
};
unsigned int ql83xx_minidump_len = 131072;

View File

@ -37,7 +37,6 @@
#define QL_DBG_CAP_SIZE_ARRAY_LEN 8
#define QL_NO_OF_OCM_WINDOWS 16
typedef struct ql_mdump_tmplt_hdr {
uint32_t entry_type ;
uint32_t first_entry_offset ;
@ -251,7 +250,6 @@ typedef struct ql_minidump_entry_cache_s {
} ;
} ql_minidump_entry_cache_t ;
/*
* Read OCM Entry Header
*/
@ -467,4 +465,3 @@ typedef struct ql_minidump_entry_rd_modify_wr_with_poll_s {
} ql_minidump_entry_rd_modify_wr_with_poll_t;
#endif /* #ifndef _QL_MINIDUMP_H_ */

View File

@ -103,7 +103,6 @@ ql_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, q80_offchip_mem_val_t *val,
uint32_t count = 100;
uint32_t data, step = 0;
if (QL_ERR_INJECT(ha, INJCT_RDWR_OFFCHIPMEM_FAILURE))
goto exit_ql_rdwr_offchip_mem;
@ -203,7 +202,7 @@ ql_rdwr_offchip_mem(qla_host_t *ha, uint64_t addr, q80_offchip_mem_val_t *val,
} else
qla_mdelay(__func__, 1);
}
exit_ql_rdwr_offchip_mem:
device_printf(ha->pci_dev,
@ -642,7 +641,7 @@ ql_wr_flash_buffer(qla_host_t *ha, uint32_t off, uint32_t size, void *buf)
if (buf == NULL)
return -1;
if ((data = malloc(size, M_QLA83XXBUF, M_NOWAIT)) == NULL) {
device_printf(ha->pci_dev, "%s: malloc failed \n", __func__);
rval = -1;
@ -726,7 +725,7 @@ qla_init_from_flash(qla_host_t *ha)
uint32_t data;
qla_ld_fw_init(ha);
do {
data = READ_REG32(ha, Q8_CMDPEG_STATE);
@ -761,9 +760,7 @@ ql_init_hw(qla_host_t *ha)
QL_DPRINT1(ha, (dev, "%s: enter\n", __func__));
if (ha->pci_func & 0x1) {
while ((ha->pci_func & 0x1) && delay--) {
val = READ_REG32(ha, Q8_CMDPEG_STATE);
if (val == 0xFF01) {
@ -779,7 +776,6 @@ ql_init_hw(qla_host_t *ha)
goto ql_init_hw_exit;
}
val = READ_REG32(ha, Q8_CMDPEG_STATE);
if (!cold || (val != 0xFF01) || ha->qla_initiate_recovery) {
ret = qla_init_from_flash(ha);
@ -827,7 +823,7 @@ ql_read_mac_addr(qla_host_t *ha)
ha->hw.mac_addr[4] = macp[1];
ha->hw.mac_addr[3] = macp[2];
ha->hw.mac_addr[2] = macp[3];
macp = (uint8_t *)&mac_hi;
ha->hw.mac_addr[1] = macp[0];
ha->hw.mac_addr[0] = macp[1];
@ -868,7 +864,6 @@ qla_wr_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr)
wr_l = (q8_wrl_e_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t));
for (i = 0; i < ce_hdr->opcount; i++, wr_l++) {
if (ql_rdwr_indreg32(ha, wr_l->addr, &wr_l->value, 0)) {
device_printf(ha->pci_dev,
"%s: [0x%08x 0x%08x] error\n", __func__,
@ -892,7 +887,6 @@ qla_rd_wr_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr)
rd_wr_l = (q8_rdwrl_e_t *)((uint8_t *)ce_hdr + sizeof (q8_ce_hdr_t));
for (i = 0; i < ce_hdr->opcount; i++, rd_wr_l++) {
if (ql_rdwr_indreg32(ha, rd_wr_l->rd_addr, &data, 1)) {
device_printf(ha->pci_dev, "%s: [0x%08x] error\n",
__func__, rd_wr_l->rd_addr);
@ -920,7 +914,6 @@ qla_poll_reg(qla_host_t *ha, uint32_t addr, uint32_t ms_to, uint32_t tmask,
uint32_t data;
while (ms_to) {
if (ql_rdwr_indreg32(ha, addr, &data, 1)) {
device_printf(ha->pci_dev, "%s: [0x%08x] error\n",
__func__, addr);
@ -960,7 +953,6 @@ qla_poll_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr)
break;
if (qla_poll_reg(ha, pe->addr, ce_hdr->delay_to,
phdr->tmask, phdr->tvalue)) {
if (ql_rdwr_indreg32(ha, pe->to_addr, &data,
1)) {
device_printf(ha->pci_dev,
@ -992,7 +984,6 @@ qla_poll_write_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr)
wr_e = (q8_poll_wr_e_t *)((uint8_t *)phdr + sizeof(q8_poll_hdr_t));
for (i = 0; i < ce_hdr->opcount; i++, wr_e++) {
if (ql_rdwr_indreg32(ha, wr_e->dr_addr, &wr_e->dr_value, 0)) {
device_printf(ha->pci_dev,
"%s: [0x%08x 0x%08x] error\n", __func__,
@ -1109,7 +1100,6 @@ qla_read_modify_write_list(qla_host_t *ha, q8_ce_hdr_t *ce_hdr)
sizeof(q8_rdmwr_hdr_t));
for (i = 0; i < ce_hdr->opcount; i++, rdmwr_e++) {
if (qla_rdmwr(ha, rdmwr_e->rd_addr, rdmwr_e->wr_addr,
rdmwr_hdr)) {
return -1;
@ -1238,12 +1228,10 @@ qla_load_offchip_mem(qla_host_t *ha, uint64_t addr, uint32_t *data32,
default:
break;
}
return ret;
}
static int
qla_load_bootldr(qla_host_t *ha)
{
@ -1297,7 +1285,6 @@ qla_ld_fw_init(qla_host_t *ha)
__func__);
return -1;
}
buf = ql83xx_resetseq + hdr->stop_seq_off;
@ -1410,7 +1397,6 @@ ql_start_sequence(qla_host_t *ha, uint16_t index)
WRITE_REG32(ha, Q8_FW_IMAGE_VALID, 0x12345678);
#endif /* #ifdef QL_LDFLASH_FW */
index = end_idx;
buf = ql83xx_resetseq + hdr->start_seq_off;
@ -1422,4 +1408,3 @@ ql_start_sequence(qla_host_t *ha, uint16_t index)
return (0);
}

View File

@ -35,7 +35,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "ql_os.h"
#include "ql_hw.h"
#include "ql_def.h"
@ -128,7 +127,6 @@ MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
#define QL_STD_REPLENISH_THRES 0
#define QL_JUMBO_REPLENISH_THRES 32
static char dev_str[64];
static char ver_str[64];
@ -277,7 +275,6 @@ qla_watchdog(void *arg)
if (!ha->offline &&
(ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
(ha->msg_from_peer == QL_PEER_MSG_RESET))) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
ql_update_link_state(ha);
@ -298,7 +295,6 @@ qla_watchdog(void *arg)
} else {
if (ha->qla_interface_up) {
ha->watchdog_ticks++;
if (ha->watchdog_ticks > 1000)
@ -313,7 +309,6 @@ qla_watchdog(void *arg)
taskqueue_enqueue(ha->async_event_tq,
&ha->async_event_task);
}
}
ha->qla_watchdog_paused = 0;
}
@ -580,7 +575,6 @@ qla_pci_detach(device_t dev)
qla_host_t *ha = NULL;
struct ifnet *ifp;
if ((ha = device_get_softc(dev)) == NULL) {
device_printf(dev, "cannot get softc\n");
return (ENOMEM);
@ -686,7 +680,6 @@ qla_release(qla_host_t *ha)
ha->mbx_irq);
for (i = 0; i < ha->hw.num_sds_rings; i++) {
if (ha->irq_vec[i].handle) {
(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
ha->irq_vec[i].handle);
@ -839,7 +832,7 @@ qla_alloc_parent_dma_tag(qla_host_t *ha)
}
ha->flags.parent_tag = 1;
return (0);
}
@ -1008,7 +1001,6 @@ qla_set_multi(qla_host_t *ha, uint32_t add_multi)
add_multi, (uint32_t)mcnt, 0);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (!add_multi) {
ret = qla_hw_del_all_mcast(ha);
@ -1020,7 +1012,6 @@ qla_set_multi(qla_host_t *ha, uint32_t add_multi)
if (!ret)
ret = ql_hw_set_multi(ha, mta, mcnt, 1);
}
QLA_UNLOCK(ha, __func__);
@ -1046,7 +1037,6 @@ qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
__func__, cmd));
if (ifa->ifa_addr->sa_family == AF_INET) {
ret = QLA_LOCK(ha, __func__,
QLA_LOCK_DEFAULT_MS_TIMEOUT,
QLA_LOCK_NO_SLEEP);
@ -1126,7 +1116,6 @@ qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
ha->if_flags, ifp->if_flags, 0);
if (ifp->if_flags & IFF_UP) {
ha->max_frame_size = ifp->if_mtu +
ETHER_HDR_LEN + ETHER_CRC_LEN;
qla_init_locked(ha);
@ -1207,7 +1196,6 @@ qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
qla_init_locked(ha);
QLA_UNLOCK(ha, __func__);
}
VLAN_CAPABILITIES(ifp);
break;
@ -1255,7 +1243,7 @@ qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
ql_update_link_state(ha);
if (ha->hw.link_up) {
ifmr->ifm_status |= IFM_ACTIVE;
@ -1268,7 +1256,6 @@ qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
return;
}
static int
qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
uint32_t iscsi_pdu)
@ -1307,7 +1294,6 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
BUS_DMA_NOWAIT);
if (ret == EFBIG) {
struct mbuf *m;
QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
@ -1328,7 +1314,6 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
segs, &nsegs, BUS_DMA_NOWAIT))) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
@ -1343,7 +1328,6 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
}
} else if (ret) {
ha->err_tx_dmamap_load++;
device_printf(ha->pci_dev,
@ -1405,9 +1389,7 @@ qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
struct ifnet *ifp = ha->ifp;
if (mtx_initialized(&fp->tx_mtx)) {
if (fp->tx_br != NULL) {
mtx_lock(&fp->tx_mtx);
while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
@ -1468,7 +1450,6 @@ qla_fp_taskqueue(void *context, int pending)
mp = drbr_peek(ifp, fp->tx_br);
while (mp != NULL) {
if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
#ifdef QL_ENABLE_ISCSI_TLV
if (ql_iscsi_pdu(ha, mp) == 0) {
@ -1536,7 +1517,6 @@ qla_create_fp_taskqueues(qla_host_t *ha)
uint8_t tq_name[32];
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp = &ha->tx_fp[i];
bzero(tq_name, sizeof (tq_name));
@ -1567,7 +1547,6 @@ qla_destroy_fp_taskqueues(qla_host_t *ha)
int i;
for (i = 0; i < ha->hw.num_sds_rings; i++) {
qla_tx_fp_t *fp = &ha->tx_fp[i];
if (fp->fp_taskqueue != NULL) {
@ -1646,7 +1625,6 @@ qla_qflush(struct ifnet *ifp)
QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
for (i = 0; i < ha->hw.num_sds_rings; i++) {
fp = &ha->tx_fp[i];
if (fp == NULL)
@ -1743,12 +1721,10 @@ qla_alloc_xmt_bufs(qla_host_t *ha)
for (j = 0; j < ha->hw.num_tx_rings; j++) {
for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
txb = &ha->tx_ring[j].tx_buf[i];
if ((ret = bus_dmamap_create(ha->tx_tag,
BUS_DMA_NOWAIT, &txb->map))) {
ha->err_tx_dmamap_create++;
device_printf(ha->pci_dev,
"%s: bus_dmamap_create failed[%d]\n",
@ -1816,7 +1792,6 @@ qla_free_xmt_bufs(qla_host_t *ha)
return;
}
static int
qla_alloc_rcv_std(qla_host_t *ha)
{
@ -1825,11 +1800,9 @@ qla_alloc_rcv_std(qla_host_t *ha)
qla_rx_ring_t *rx_ring;
for (r = 0; r < ha->hw.num_rds_rings; r++) {
rx_ring = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
rxb = &rx_ring->rx_buf[i];
ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
@ -1860,9 +1833,7 @@ qla_alloc_rcv_std(qla_host_t *ha)
qla_init_hw_rcv_descriptors(ha);
for (r = 0; r < ha->hw.num_rds_rings; r++) {
rx_ring = &ha->rx_ring[r];
for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
@ -1929,7 +1900,6 @@ qla_alloc_rcv_bufs(qla_host_t *ha)
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&ha->rx_tag)) {
device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
__func__);
@ -1992,7 +1962,6 @@ ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
mbuf_size = MCLBYTES;
if (mp == NULL) {
if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
return(-1);
@ -2047,7 +2016,6 @@ ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
return (ret);
}
static void
qla_get_peer(qla_host_t *ha)
{
@ -2074,10 +2042,9 @@ static void
qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
{
qla_host_t *ha_peer;
if (ha->peer_dev) {
if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
ha_peer->msg_from_peer = msg_to_peer;
}
}
@ -2118,7 +2085,6 @@ qla_error_recovery(void *context, int pending)
__func__, qla_get_usec_timestamp());
if (ha->qla_interface_up) {
qla_mdelay(__func__, 300);
//ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
@ -2141,7 +2107,6 @@ qla_error_recovery(void *context, int pending)
qla_drain_fp_taskqueues(ha);
if ((ha->pci_func & 0x1) == 0) {
if (!ha->msg_from_peer) {
qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
@ -2176,7 +2141,6 @@ qla_error_recovery(void *context, int pending)
} else {
if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
ha->msg_from_peer = 0;
if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
@ -2220,7 +2184,6 @@ qla_error_recovery(void *context, int pending)
ha->qla_initiate_recovery = 0;
if (ha->qla_interface_up) {
if (qla_alloc_xmt_bufs(ha) != 0) {
ha->offline = 1;
goto qla_error_recovery_exit;
@ -2261,7 +2224,6 @@ qla_error_recovery(void *context, int pending)
ha->hw.sp_log_stop = -1;
}
QLA_UNLOCK(ha, __func__);
if (!ha->offline)
@ -2303,4 +2265,3 @@ qla_stats(void *context, int pending)
return;
}

View File

@ -154,13 +154,13 @@ MALLOC_DECLARE(M_QLA83XXBUF);
else \
pause(fn, qla_ms_to_hz(msecs)); \
}
/*
* Locks
*/
#define QLA_LOCK(ha, str, to_ms, no_sleep) qla_lock(ha, str, to_ms, no_sleep)
#define QLA_UNLOCK(ha, str) qla_unlock(ha, str)
/*
* structure encapsulating a DMA buffer
*/
@ -174,5 +174,4 @@ struct qla_dma {
};
typedef struct qla_dma qla_dma_t;
#endif /* #ifndef _QL_OS_H_ */

View File

@ -1409,4 +1409,3 @@ unsigned char ql83xx_resetseq[] = {
0xdf, 0xfa, 0x8f, 0x87
};
unsigned int ql83xx_resetseq_len = 16384;

View File

@ -35,7 +35,6 @@
#ifndef _QL_TMPLT_H_
#define _QL_TMPLT_H_
typedef struct _q8_tmplt_hdr {
uint16_t version;
uint16_t signature;
@ -47,7 +46,6 @@ typedef struct _q8_tmplt_hdr {
uint16_t start_seq_off;
} __packed q8_tmplt_hdr_t;
typedef struct _q8_ce_hdr {
uint16_t opcode;
uint16_t size;