ofed/drivers: minor spelling fixes.

No functional change.

Reviewed by:	hselasky
This commit is contained in:
Pedro F. Giffuni 2016-05-06 15:16:13 +00:00
parent 2a392dd62b
commit bf5cba36db
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=299179
10 changed files with 14 additions and 14 deletions

View File

@ -799,7 +799,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
unsigned long command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
/* compatability handling for commands 0 & 1*/
/* compatibility handling for commands 0 & 1*/
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
}

View File

@ -689,7 +689,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
cur_join_state = group->rec.scope_join_state & 7;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
/* successful join */
if (!cur_join_state && resp_join_state)
--rc;
} else if (!resp_join_state)

View File

@ -341,8 +341,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
address for the start of the MR.
*/
/* umem_get aligned the start_va to a page
boundry. Therefore, we need to align the
start va to the same boundry */
boundary. Therefore, we need to align the
start va to the same boundary */
/* misalignment_bits is needed to handle the
case of a single memory region. In this
case, the rest of the logic will not reduce

View File

@ -66,7 +66,7 @@ struct name { \
struct type *lh_first; /* first element */ \
}
/* Interval between sucessive polls in the Tx routine when polling is used
/* Interval between successive polls in the Tx routine when polling is used
instead of interrupts (in per-core Tx rings) - should be power of 2 */
#define SDP_TX_POLL_MODER 16
#define SDP_TX_POLL_TIMEOUT (HZ / 20)
@ -424,8 +424,8 @@ struct sdp_sock {
/* SDP slow start */
int recv_request_head; /* mark the rx_head when the resize request
was recieved */
int recv_request; /* XXX flag if request to resize was recieved */
was received */
int recv_request; /* XXX flag if request to resize was received */
unsigned long tx_packets;
unsigned long rx_packets;

View File

@ -328,7 +328,7 @@ sdp_poll_tx(struct sdp_sock *ssk)
SDPSTATS_COUNTER_INC(tx_poll_hit);
inflight = (u32) tx_ring_posted(ssk);
sdp_prf1(ssk->socket, NULL, "finished tx proccessing. inflight = %d",
sdp_prf1(ssk->socket, NULL, "finished tx processing. inflight = %d",
inflight);
/* If there are still packets in flight and the timer has not already

View File

@ -552,7 +552,7 @@ mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
/* For cpu arch with cache line of 64B the performance is better when cqe size==64B
* To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
* was added in the beginning of each cqe (the real data is in the corresponding 32B).
* The following calc ensures that when factor==1, it means we are alligned to 64B
* The following calc ensures that when factor==1, it means we are aligned to 64B
* and we get the real cqe data*/
#define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor)
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)

View File

@ -1901,7 +1901,7 @@ void mlx4_opreq_action(struct work_struct *work)
MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Failed to retreive required operation: %d\n", err);
mlx4_err(dev, "Failed to retrieve required operation: %d\n", err);
return;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);

View File

@ -159,7 +159,7 @@ MODULE_PARM_DESC(high_rate_steer, "Enable steering mode for higher packet rate"
static int fast_drop;
module_param_named(fast_drop, fast_drop, int, 0444);
MODULE_PARM_DESC(fast_drop,
"Enable fast packet drop when no recieve WQEs are posted");
"Enable fast packet drop when no receive WQEs are posted");
int mlx4_enable_64b_cqe_eqe = 1;
module_param_named(enable_64b_cqe_eqe, mlx4_enable_64b_cqe_eqe, int, 0644);
@ -2452,7 +2452,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void __mlx4_counter_free(struct mlx4_dev *dev, int slave, int port, u32 idx)
{
/* check if native or slave and deletes acordingly */
/* check if native or slave and deletes accordingly */
struct mlx4_priv *priv = mlx4_priv(dev);
struct counter_index *pf, *tmp_pf;
struct counter_index *vf, *tmp_vf;

View File

@ -164,7 +164,7 @@ enum mlx4_res_tracker_free_type {
/*
*Virtual HCR structures.
* mlx4_vhcr is the sw representation, in machine endianess
* mlx4_vhcr is the sw representation, in machine endianness
*
* mlx4_vhcr_cmd is the formalized structure, the one that is passed
* to FW to go through communication channel.

View File

@ -2916,7 +2916,7 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
/* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct
* - Translate inbox contents to simple addresses in host endianess */
* - Translate inbox contents to simple addresses in host endianness */
mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0;