net/mlx5: optimize Tx doorbell write

Unnecessary volatile attribute keeps compiler from further optimizing the
code and this results in a little performance drop (~2%). Because of memory
barriers, it is safe to remove.

Fixes: 6bf10ab69be0 ("net/mlx5: support 32-bit systems")
Cc: stable@dpdk.org

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Yongseok Koh 2018-11-15 10:29:19 +00:00 committed by Ferruh Yigit
parent feddd5d243
commit 317e64739d

View File

@ -379,17 +379,16 @@ uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
* Address of the lock to use for that UAR access.
*/
static __rte_always_inline void
__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
__mlx5_uar_write64_relaxed(uint64_t val, void *addr,
rte_spinlock_t *lock __rte_unused)
{
#ifdef RTE_ARCH_64
rte_write64_relaxed(val, addr);
*(uint64_t *)addr = val;
#else /* !RTE_ARCH_64 */
rte_spinlock_lock(lock);
rte_write32_relaxed(val, addr);
*(uint32_t *)addr = val;
rte_io_wmb();
rte_write32_relaxed(val >> 32,
(volatile void *)((volatile char *)addr + 4));
*((uint32_t *)addr + 1) = val >> 32;
rte_spinlock_unlock(lock);
#endif
}
@ -407,7 +406,7 @@ __mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
* Address of the lock to use for that UAR access.
*/
static __rte_always_inline void
__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
{
rte_io_wmb();
__mlx5_uar_write64_relaxed(val, addr, lock);