net/hns3: remove some blank lines

According to the rule of the static check tools
that arrange blank lines properly to keep the
code compact, here remove some unnecessary blank
line to fix the above rule warning.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
This commit is contained in:
Lijun Ou 2020-11-09 22:29:02 +08:00 committed by Ferruh Yigit
parent 80ec1bbd5b
commit ee1607167b
5 changed files with 15 additions and 31 deletions

View File

@ -198,7 +198,6 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
int clean;
head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
if (!is_valid_csq_clean_head(csq, head)) {
hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
csq->next_to_use, csq->next_to_clean);

View File

@ -238,7 +238,6 @@ hns3_interrupt_handler(void *param)
hns3_pf_disable_irq0(hw);
event_cause = hns3_check_event_cause(hns, &clearval);
/* vector 0 interrupt is shared with reset and mailbox source events. */
if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
hns3_warn(hw, "Received err interrupt");
@ -3556,9 +3555,7 @@ hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
priv = &buf_alloc->priv_buf[i];
mask = BIT((uint8_t)i);
if (hw->hw_tc_map & mask &&
hw->dcb_info.hw_pfc_map & mask) {
if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
@ -3612,7 +3609,6 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw,
for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
priv->enable = 0;
priv->wl.low = 0;
priv->wl.high = 0;

View File

@ -209,8 +209,7 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE,
rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Read counter fail.");
return ret;
}
@ -547,7 +546,6 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
ipv4_mask = item->mask;
if (ipv4_mask->hdr.total_length ||
ipv4_mask->hdr.packet_id ||
ipv4_mask->hdr.fragment_offset ||
@ -616,8 +614,8 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
ipv6_mask = item->mask;
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
ipv6_mask->hdr.hop_limits) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
@ -672,12 +670,10 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) {
tcp_mask = item->mask;
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
tcp_mask->hdr.tcp_flags ||
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
@ -776,7 +772,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst port in SCTP");
if (sctp_mask->hdr.src_port) {
hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
rule->key_conf.mask.src_port =
@ -1069,8 +1064,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
}
static int
hns3_parse_normal(const struct rte_flow_item *item,
struct hns3_fdir_rule *rule,
hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
struct items_step_mngr *step_mngr,
struct rte_flow_error *error)
{
@ -1331,9 +1325,8 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out,
.key_len = in->key_len,
.queue_num = in->queue_num,
};
out->conf.queue =
memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num);
out->conf.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num);
if (in->key)
out->conf.key = memcpy(out->key, in->key, in->key_len);
@ -1783,17 +1776,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
if (flow == NULL) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to allocate flow memory");
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Failed to allocate flow memory");
return NULL;
}
flow_node = rte_zmalloc("hns3 flow node",
sizeof(struct hns3_flow_mem), 0);
if (flow_node == NULL) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to allocate flow list memory");
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Failed to allocate flow list memory");
rte_free(flow);
return NULL;
}

View File

@ -1584,7 +1584,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
return -EINVAL;

View File

@ -679,7 +679,6 @@ hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
(*count)++;
}
}
}
void