Whitespace fixes.

MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2016-03-04 09:07:30 +00:00
parent 05c0884ee1
commit 96608f1ff4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=296382
10 changed files with 94 additions and 94 deletions

View File

@ -82,8 +82,8 @@ static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
pr_err("trying to set local_comm_id in SIDR_REP\n");
return;
} else {
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->local_comm_id = cpu_to_be32(cm_id);
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->local_comm_id = cpu_to_be32(cm_id);
}
}
@ -97,8 +97,8 @@ static u32 get_local_comm_id(struct ib_mad *mad)
pr_err("trying to set local_comm_id in SIDR_REP\n");
return -1;
} else {
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->local_comm_id);
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->local_comm_id);
}
}
@ -112,8 +112,8 @@ static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
pr_err("trying to set remote_comm_id in SIDR_REQ\n");
return;
} else {
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->remote_comm_id = cpu_to_be32(cm_id);
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->remote_comm_id = cpu_to_be32(cm_id);
}
}
@ -127,8 +127,8 @@ static u32 get_remote_comm_id(struct ib_mad *mad)
pr_err("trying to set remote_comm_id in SIDR_REQ\n");
return -1;
} else {
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->remote_comm_id);
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->remote_comm_id);
}
}

View File

@ -873,10 +873,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
+ !cqe->timestamp_0_15) << 16)
| be16_to_cpu(cqe->timestamp_0_15);
wc->wc_flags |= IB_WC_WITH_TIMESTAMP;
}
}
} else {
wc->wc_flags |= IB_WC_WITH_SLID;
wc->slid = be16_to_cpu(cqe->rlid);
wc->slid = be16_to_cpu(cqe->rlid);
}
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
@ -886,12 +886,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (!timestamp_en) {
if (rdma_port_get_link_layer(wc->qp->device,
if (rdma_port_get_link_layer(wc->qp->device,
(*cur_qp)->port) ==
IB_LINK_LAYER_ETHERNET)
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
else
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
else
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->wc_flags |= IB_WC_WITH_SL;
}
if ((be32_to_cpu(cqe->vlan_my_qpn) &

View File

@ -1813,8 +1813,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
ctx->port, 0xFFFF, &attr.pkey_index);
if (ret || !create_tun)
attr.pkey_index =
to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
attr.pkey_index =
to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
attr.qkey = IB_QP1_QKEY;
attr.port_num = ctx->port;
ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);

View File

@ -1119,7 +1119,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
pr_err("Invalid priority value.\n");
return -EINVAL;
}
}
if (domain >= IB_FLOW_DOMAIN_NUM) {
pr_err("Invalid domain value.\n");
return -EINVAL;
@ -1198,7 +1198,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
switch (flow_attr->type) {
case IB_FLOW_ATTR_NORMAL:
type[0] = MLX4_FS_REGULAR;
break;
break;
case IB_FLOW_ATTR_ALL_DEFAULT:
type[0] = MLX4_FS_ALL_DEFAULT;
@ -1221,7 +1221,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
while (i < ARRAY_SIZE(type) && type[i]) {
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
&mflow->reg_id[i]);
if (err)
if (err)
goto err_free;
i++;
}
@ -1605,11 +1605,11 @@ static void update_gids_task(struct work_struct *work)
IB_LINK_LAYER_ETHERNET) {
err = mlx4_cmd(dev, mailbox->dma,
MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
pr_warn("set port command failed\n");
if (err)
pr_warn("set port command failed\n");
else
mlx4_ib_dispatch_event(gw->dev, gw->port,
IB_EVENT_GID_CHANGE);
@ -1686,8 +1686,8 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
if (found >= 0) {
need_update = 1;
dev->iboe.gid_table[port - 1][found] = zgid;
break;
}
break;
}
} else {
if (found >= 0)
break;
@ -1696,22 +1696,22 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
!memcmp(&dev->iboe.gid_table[port - 1][i],
&zgid, sizeof(*gid)))
free = i;
}
}
}
}
if (found == -1 && !clear && free < 0) {
pr_err("GID table of port %d is full. Can't add "GID_PRINT_FMT"\n",
port, GID_PRINT_ARGS(gid));
return -ENOMEM;
}
}
if (found == -1 && clear) {
pr_err(GID_PRINT_FMT" is not in GID table of port %d\n", GID_PRINT_ARGS(gid), port);
return -EINVAL;
}
}
if (found == -1 && !clear && free >= 0) {
dev->iboe.gid_table[port - 1][free] = *gid;
need_update = 1;
}
}
if (!need_update)
return 0;
@ -1721,10 +1721,10 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return -ENOMEM;
memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
INIT_WORK(&work->work, update_gids_task);
work->port = port;
work->dev = dev;
queue_work(wq, &work->work);
INIT_WORK(&work->work, update_gids_task);
work->port = port;
work->dev = dev;
queue_work(wq, &work->work);
return 0;
}
@ -1773,7 +1773,7 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, struct mlx4_ib_dev *ibdev
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) && (real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) && (real_dev == iboe->netdevs[port - 1])))
break;
break;
return port > MLX4_MAX_PORTS ? 0 : port;
}
@ -1809,11 +1809,11 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibd
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
update_gid_table(ibdev, port, pgid, 0, 0);
}
update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
}
}
#endif
}
@ -2002,10 +2002,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) {
sprintf(name, "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
pci_get_domain(dev->pdev->dev.bsddev),
pci_get_bus(dev->pdev->dev.bsddev),
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
pci_get_domain(dev->pdev->dev.bsddev),
pci_get_bus(dev->pdev->dev.bsddev),
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name,
@ -2168,7 +2168,7 @@ static struct attribute_group diag_counters_group = {
static void init_dev_assign(void)
{
int i = 1;
spin_lock_init(&dev_num_str_lock);
if (mlx4_fill_dbdf2val_tbl(&dev_assign_str))
return;
@ -2268,7 +2268,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (dev_idx >= 0)
sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
else
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
@ -2471,8 +2471,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
if (!iboe->nb.notifier_call) {
iboe->nb.notifier_call = mlx4_ib_netdev_event;
err = register_netdevice_notifier(&iboe->nb);
iboe->nb.notifier_call = mlx4_ib_netdev_event;
err = register_netdevice_notifier(&iboe->nb);
if (err) {
iboe->nb.notifier_call = NULL;
goto err_notify;
@ -2519,8 +2519,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
if (ibdev->iboe.nb_inet.notifier_call) {
@ -2873,12 +2873,12 @@ module_exit(mlx4_ib_cleanup);
static int
mlx4ib_evhand(module_t mod, int event, void *arg)
{
return (0);
return (0);
}
static moduledata_t mlx4ib_mod = {
.name = "mlx4ib",
.evhand = mlx4ib_evhand,
.name = "mlx4ib",
.evhand = mlx4ib_evhand,
};
DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_SMP, SI_ORDER_ANY);

View File

@ -135,8 +135,8 @@ struct mcast_req {
#define safe_atomic_dec(ref) \
do {\
if (atomic_dec_and_test(ref)) \
do {\
if (atomic_dec_and_test(ref)) \
mcg_warn_group(group, "did not expect to reach zero\n"); \
} while (0)
@ -570,7 +570,7 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
group->state = MCAST_IDLE;
atomic_inc(&group->refcount);
if (!queue_work(group->demux->mcg_wq, &group->work))
safe_atomic_dec(&group->refcount);
safe_atomic_dec(&group->refcount);
mutex_unlock(&group->lock);
}
@ -877,7 +877,7 @@ static void queue_req(struct mcast_req *req)
list_add_tail(&req->func_list, &group->func[req->func].pending);
/* calls mlx4_ib_mcg_work_handler */
if (!queue_work(group->demux->mcg_wq, &group->work))
safe_atomic_dec(&group->refcount);
safe_atomic_dec(&group->refcount);
}
int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
@ -913,7 +913,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
/* calls mlx4_ib_mcg_work_handler */
atomic_inc(&group->refcount);
if (!queue_work(ctx->mcg_wq, &group->work))
safe_atomic_dec(&group->refcount);
safe_atomic_dec(&group->refcount);
mutex_unlock(&group->lock);
release_group(group, 0);
return 1; /* consumed */

View File

@ -507,7 +507,7 @@ struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
struct net_device *masters[MLX4_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb;
struct notifier_block nb_inet;
union ib_gid gid_table[MLX4_MAX_PORTS][128];
};

View File

@ -240,7 +240,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
cur_start_addr =
sg_dma_address(sg);
len = sg_dma_len(sg);
}
}
/* Handle the last block */
if (len > 0) {
@ -365,40 +365,40 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
next_block_start =
sg_dma_address(sg);
current_block_end = current_block_start
+ current_block_len;
/* If we have a split (non-contig.) between two block*/
if (current_block_end != next_block_start) {
block_shift = mlx4_ib_umem_calc_block_mtt(
next_block_start,
current_block_end,
block_shift);
+ current_block_len;
/* If we have a split (non-contig.) between two block*/
if (current_block_end != next_block_start) {
block_shift = mlx4_ib_umem_calc_block_mtt(
next_block_start,
current_block_end,
block_shift);
/* If we reached the minimum shift for 4k
page we stop the loop.
*/
if (block_shift <= min_shift)
goto end;
/* If not saved yet we are in first block -
we save the length of first block to
calculate the non_aligned_pages number at
* the end.
*/
total_len += current_block_len;
/* Start a new block */
current_block_start = next_block_start;
current_block_len =
sg_dma_len(sg);
continue;
}
/* The scatter entry is another part of
the current block, increase the block size
* An entry in the scatter can be larger than
4k (page) as of dma mapping
which merge some blocks together.
/* If we reached the minimum shift for 4k
page we stop the loop.
*/
current_block_len +=
if (block_shift <= min_shift)
goto end;
/* If not saved yet we are in first block -
we save the length of first block to
calculate the non_aligned_pages number at
* the end.
*/
total_len += current_block_len;
/* Start a new block */
current_block_start = next_block_start;
current_block_len =
sg_dma_len(sg);
continue;
}
/* The scatter entry is another part of
the current block, increase the block size
* An entry in the scatter can be larger than
4k (page) as of dma mapping
which merge some blocks together.
*/
current_block_len +=
sg_dma_len(sg);
}
@ -641,7 +641,7 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
if (!umem)
goto end;
ib_umem_release(mr->umem);
ib_umem_release(mr->umem);
end:
kfree(mr);

View File

@ -1691,7 +1691,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
* If one was already assigned, but the new mac differs,
* unregister the old one and register the new one.
*/
u64_mac = mlx4_mac_to_u64(smac);
u64_mac = mlx4_mac_to_u64(smac);
if (!smac_info->smac || smac_info->smac != u64_mac) {
/* register candidate now, unreg if needed, after success */
@ -2746,7 +2746,7 @@ static __be32 convert_access(int acc)
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
(acc & IB_ACCESS_REMOTE_READ ?
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
}

View File

@ -386,7 +386,6 @@ ipoib_poll(struct ipoib_dev_priv *priv)
spin_lock(&priv->drain_lock);
for (;;) {
n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; i++) {
struct ib_wc *wc = priv->ibwc + i;

View File

@ -89,6 +89,7 @@ int ipoib_init_qp(struct ipoib_dev_priv *priv)
IB_QP_PORT |
IB_QP_PKEY_INDEX |
IB_QP_STATE;
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);