net/ena/base: adjust to latest ena-com
1. As memzone uses unique names, changed alloc coherent macro to use 64 bit size atomic variable to increase the memzone name space 2. "handle" param name change to be consistent with other macros 3. Variable definition displacement 4. Backslash alignment to column 80 Signed-off-by: Amit Bernstein <amitbern@amazon.com> Reviewed-by: Igor Chauskin <igorch@amazon.com> Reviewed-by: Shay Agroskin <shayagr@amazon.com> Reviewed-by: Michal Krawczyk <mk@semihalf.com>
This commit is contained in:
parent
eb357d5f86
commit
7c0a233eef
@ -34,10 +34,12 @@ typedef uint8_t u8;
|
||||
|
||||
typedef struct rte_eth_dev ena_netdev;
|
||||
typedef uint64_t dma_addr_t;
|
||||
|
||||
#ifndef ETIME
|
||||
#define ETIME ETIMEDOUT
|
||||
#endif
|
||||
|
||||
#define ENA_PRIu64 PRIu64
|
||||
#define ena_atomic32_t rte_atomic32_t
|
||||
#define ena_mem_handle_t const struct rte_memzone *
|
||||
|
||||
@ -76,7 +78,7 @@ typedef uint64_t dma_addr_t;
|
||||
#define __iomem
|
||||
|
||||
#define US_PER_S 1000000
|
||||
#define ENA_GET_SYSTEM_USECS() \
|
||||
#define ENA_GET_SYSTEM_USECS() \
|
||||
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
|
||||
|
||||
extern int ena_logtype_com;
|
||||
@ -95,20 +97,20 @@ extern int ena_logtype_com;
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
|
||||
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
|
||||
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
|
||||
|
||||
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
|
||||
#define ena_trc_log(dev, level, fmt, arg...) \
|
||||
( \
|
||||
ENA_TOUCH(dev), \
|
||||
rte_log(RTE_LOG_ ## level, ena_logtype_com, \
|
||||
"[ENA_COM: %s]" fmt, __func__, ##arg) \
|
||||
#define ena_trc_log(dev, level, fmt, arg...) \
|
||||
( \
|
||||
ENA_TOUCH(dev), \
|
||||
rte_log(RTE_LOG_ ## level, ena_logtype_com, \
|
||||
"[ENA_COM: %s]" fmt, __func__, ##arg) \
|
||||
)
|
||||
|
||||
#define ena_trc_dbg(dev, format, arg...) ena_trc_log(dev, DEBUG, format, ##arg)
|
||||
#define ena_trc_info(dev, format, arg...) ena_trc_log(dev, INFO, format, ##arg)
|
||||
#define ena_trc_warn(dev, format, arg...) \
|
||||
#define ena_trc_warn(dev, format, arg...) \
|
||||
ena_trc_log(dev, WARNING, format, ##arg)
|
||||
#define ena_trc_err(dev, format, arg...) ena_trc_log(dev, ERR, format, ##arg)
|
||||
#else
|
||||
@ -118,51 +120,51 @@ extern int ena_logtype_com;
|
||||
#define ena_trc_err(dev, format, arg...) ENA_TOUCH(dev)
|
||||
#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
|
||||
|
||||
#define ENA_WARN(cond, dev, format, arg...) \
|
||||
do { \
|
||||
if (unlikely(cond)) { \
|
||||
ena_trc_err(dev, \
|
||||
"Warn failed on %s:%s:%d:" format, \
|
||||
__FILE__, __func__, __LINE__, ##arg); \
|
||||
} \
|
||||
#define ENA_WARN(cond, dev, format, arg...) \
|
||||
do { \
|
||||
if (unlikely(cond)) { \
|
||||
ena_trc_err(dev, \
|
||||
"Warn failed on %s:%s:%d:" format, \
|
||||
__FILE__, __func__, __LINE__, ##arg); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Spinlock related methods */
|
||||
#define ena_spinlock_t rte_spinlock_t
|
||||
#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
|
||||
#define ENA_SPINLOCK_LOCK(spinlock, flags) \
|
||||
({(void)flags; rte_spinlock_lock(&spinlock); })
|
||||
#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \
|
||||
#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&(spinlock))
|
||||
#define ENA_SPINLOCK_LOCK(spinlock, flags) \
|
||||
({(void)flags; rte_spinlock_lock(&(spinlock)); })
|
||||
#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \
|
||||
({(void)flags; rte_spinlock_unlock(&(spinlock)); })
|
||||
#define ENA_SPINLOCK_DESTROY(spinlock) ((void)spinlock)
|
||||
#define ENA_SPINLOCK_DESTROY(spinlock) ((void)(spinlock))
|
||||
|
||||
#define q_waitqueue_t \
|
||||
struct { \
|
||||
pthread_cond_t cond; \
|
||||
pthread_mutex_t mutex; \
|
||||
#define q_waitqueue_t \
|
||||
struct { \
|
||||
pthread_cond_t cond; \
|
||||
pthread_mutex_t mutex; \
|
||||
}
|
||||
|
||||
#define ena_wait_queue_t q_waitqueue_t
|
||||
|
||||
#define ENA_WAIT_EVENT_INIT(waitqueue) \
|
||||
do { \
|
||||
pthread_mutex_init(&(waitqueue).mutex, NULL); \
|
||||
pthread_cond_init(&(waitqueue).cond, NULL); \
|
||||
#define ENA_WAIT_EVENT_INIT(waitqueue) \
|
||||
do { \
|
||||
pthread_mutex_init(&(waitqueue).mutex, NULL); \
|
||||
pthread_cond_init(&(waitqueue).cond, NULL); \
|
||||
} while (0)
|
||||
|
||||
#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \
|
||||
do { \
|
||||
struct timespec wait; \
|
||||
struct timeval now; \
|
||||
unsigned long timeout_us; \
|
||||
gettimeofday(&now, NULL); \
|
||||
wait.tv_sec = now.tv_sec + timeout / 1000000UL; \
|
||||
timeout_us = timeout % 1000000UL; \
|
||||
wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
|
||||
pthread_mutex_lock(&waitevent.mutex); \
|
||||
pthread_cond_timedwait(&waitevent.cond, \
|
||||
&waitevent.mutex, &wait); \
|
||||
pthread_mutex_unlock(&waitevent.mutex); \
|
||||
#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \
|
||||
do { \
|
||||
struct timespec wait; \
|
||||
struct timeval now; \
|
||||
unsigned long timeout_us; \
|
||||
gettimeofday(&now, NULL); \
|
||||
wait.tv_sec = now.tv_sec + (timeout) / 1000000UL; \
|
||||
timeout_us = (timeout) % 1000000UL; \
|
||||
wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \
|
||||
pthread_mutex_lock(&(waitevent).mutex); \
|
||||
pthread_cond_timedwait(&(waitevent).cond, \
|
||||
&(waitevent).mutex, &wait); \
|
||||
pthread_mutex_unlock(&(waitevent).mutex); \
|
||||
} while (0)
|
||||
#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
|
||||
/* pthread condition doesn't need to be rearmed after usage */
|
||||
@ -174,104 +176,90 @@ extern int ena_logtype_com;
|
||||
|
||||
#define ena_time_t uint64_t
|
||||
#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
|
||||
#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
|
||||
(timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
|
||||
#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
|
||||
((timeout_us) * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
|
||||
|
||||
/*
|
||||
* Each rte_memzone should have unique name.
|
||||
* To satisfy it, count number of allocations and add it to name.
|
||||
*/
|
||||
extern rte_atomic32_t ena_alloc_cnt;
|
||||
extern rte_atomic64_t ena_alloc_cnt;
|
||||
|
||||
#define ENA_MEM_ALLOC_COHERENT_ALIGNED( \
|
||||
dmadev, size, virt, phys, handle, alignment) \
|
||||
do { \
|
||||
const struct rte_memzone *mz = NULL; \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(handle); \
|
||||
if (size > 0) { \
|
||||
char z_name[RTE_MEMZONE_NAMESIZE]; \
|
||||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%d", \
|
||||
rte_atomic32_add_return(&ena_alloc_cnt, 1)); \
|
||||
mz = rte_memzone_reserve_aligned(z_name, size, \
|
||||
SOCKET_ID_ANY, \
|
||||
RTE_MEMZONE_IOVA_CONTIG, \
|
||||
alignment); \
|
||||
handle = mz; \
|
||||
} \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
#define ENA_MEM_ALLOC_COHERENT_ALIGNED( \
|
||||
dmadev, size, virt, phys, mem_handle, alignment) \
|
||||
do { \
|
||||
const struct rte_memzone *mz = NULL; \
|
||||
ENA_TOUCH(dmadev); \
|
||||
if ((size) > 0) { \
|
||||
char z_name[RTE_MEMZONE_NAMESIZE]; \
|
||||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%" PRIi64 "", \
|
||||
rte_atomic64_add_return(&ena_alloc_cnt, 1)); \
|
||||
mz = rte_memzone_reserve_aligned(z_name, (size), \
|
||||
SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,\
|
||||
alignment); \
|
||||
mem_handle = mz; \
|
||||
} \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, (size)); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
} while (0)
|
||||
#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
|
||||
ENA_MEM_ALLOC_COHERENT_ALIGNED( \
|
||||
dmadev, \
|
||||
size, \
|
||||
virt, \
|
||||
phys, \
|
||||
handle, \
|
||||
RTE_CACHE_LINE_SIZE)
|
||||
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
|
||||
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
|
||||
ENA_TOUCH(dmadev); \
|
||||
rte_memzone_free(handle); })
|
||||
#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, mem_handle) \
|
||||
ENA_MEM_ALLOC_COHERENT_ALIGNED(dmadev, size, virt, phys, \
|
||||
mem_handle, RTE_CACHE_LINE_SIZE)
|
||||
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, mem_handle) \
|
||||
({ ENA_TOUCH(size); ENA_TOUCH(phys); ENA_TOUCH(dmadev); \
|
||||
rte_memzone_free(mem_handle); })
|
||||
|
||||
#define ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED( \
|
||||
dmadev, size, virt, phys, mem_handle, node, dev_node, alignment) \
|
||||
do { \
|
||||
const struct rte_memzone *mz = NULL; \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
|
||||
if (size > 0) { \
|
||||
char z_name[RTE_MEMZONE_NAMESIZE]; \
|
||||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%d", \
|
||||
rte_atomic32_add_return(&ena_alloc_cnt, 1)); \
|
||||
mz = rte_memzone_reserve_aligned(z_name, size, node, \
|
||||
RTE_MEMZONE_IOVA_CONTIG, alignment); \
|
||||
mem_handle = mz; \
|
||||
} \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, size); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
#define ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED( \
|
||||
dmadev, size, virt, phys, mem_handle, node, dev_node, alignment) \
|
||||
do { \
|
||||
const struct rte_memzone *mz = NULL; \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
|
||||
if ((size) > 0) { \
|
||||
char z_name[RTE_MEMZONE_NAMESIZE]; \
|
||||
snprintf(z_name, sizeof(z_name), \
|
||||
"ena_alloc_%" PRIi64 "", \
|
||||
rte_atomic64_add_return(&ena_alloc_cnt, 1)); \
|
||||
mz = rte_memzone_reserve_aligned(z_name, (size), \
|
||||
node, RTE_MEMZONE_IOVA_CONTIG, alignment); \
|
||||
mem_handle = mz; \
|
||||
} \
|
||||
if (mz == NULL) { \
|
||||
virt = NULL; \
|
||||
phys = 0; \
|
||||
} else { \
|
||||
memset(mz->addr, 0, (size)); \
|
||||
virt = mz->addr; \
|
||||
phys = mz->iova; \
|
||||
} \
|
||||
} while (0)
|
||||
#define ENA_MEM_ALLOC_COHERENT_NODE( \
|
||||
dmadev, size, virt, phys, mem_handle, node, dev_node) \
|
||||
ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED( \
|
||||
dmadev, \
|
||||
size, \
|
||||
virt, \
|
||||
phys, \
|
||||
mem_handle, \
|
||||
node, \
|
||||
dev_node, \
|
||||
RTE_CACHE_LINE_SIZE)
|
||||
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
|
||||
do { \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
|
||||
virt = rte_zmalloc_socket(NULL, size, 0, node); \
|
||||
#define ENA_MEM_ALLOC_COHERENT_NODE( \
|
||||
dmadev, size, virt, phys, mem_handle, node, dev_node) \
|
||||
ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(dmadev, size, virt, phys, \
|
||||
mem_handle, node, dev_node, RTE_CACHE_LINE_SIZE)
|
||||
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
|
||||
do { \
|
||||
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
|
||||
virt = rte_zmalloc_socket(NULL, size, 0, node); \
|
||||
} while (0)
|
||||
|
||||
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
|
||||
#define ENA_MEM_FREE(dmadev, ptr, size) \
|
||||
#define ENA_MEM_FREE(dmadev, ptr, size) \
|
||||
({ ENA_TOUCH(dmadev); ENA_TOUCH(size); rte_free(ptr); })
|
||||
|
||||
#define ENA_DB_SYNC(mem_handle) ((void)mem_handle)
|
||||
|
||||
#define ENA_REG_WRITE32(bus, value, reg) \
|
||||
#define ENA_REG_WRITE32(bus, value, reg) \
|
||||
({ (void)(bus); rte_write32((value), (reg)); })
|
||||
#define ENA_REG_WRITE32_RELAXED(bus, value, reg) \
|
||||
#define ENA_REG_WRITE32_RELAXED(bus, value, reg) \
|
||||
({ (void)(bus); rte_write32_relaxed((value), (reg)); })
|
||||
#define ENA_REG_READ32(bus, reg) \
|
||||
#define ENA_REG_READ32(bus, reg) \
|
||||
({ (void)(bus); rte_read32_relaxed((reg)); })
|
||||
|
||||
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
|
||||
@ -297,8 +285,8 @@ extern rte_atomic32_t ena_alloc_cnt;
|
||||
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
|
||||
|
||||
#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
|
||||
#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
|
||||
(timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
|
||||
#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
|
||||
((timeout_us) * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
|
||||
#define ENA_WAIT_EVENTS_DESTROY(admin_queue) ((void)(admin_queue))
|
||||
|
||||
#ifndef READ_ONCE
|
||||
@ -310,14 +298,14 @@ extern rte_atomic32_t ena_alloc_cnt;
|
||||
#define READ_ONCE32(var) READ_ONCE(var)
|
||||
|
||||
/* The size must be 8 byte align */
|
||||
#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \
|
||||
do { \
|
||||
int count, i; \
|
||||
uint64_t *to = (uint64_t *)(dst); \
|
||||
const uint64_t *from = (const uint64_t *)(src); \
|
||||
count = (size) / 8; \
|
||||
for (i = 0; i < count; i++, from++, to++) \
|
||||
rte_write64_relaxed(*from, to); \
|
||||
#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \
|
||||
do { \
|
||||
int count, i; \
|
||||
uint64_t *to = (uint64_t *)(dst); \
|
||||
const uint64_t *from = (const uint64_t *)(src); \
|
||||
count = (size) / 8; \
|
||||
for (i = 0; i < count; i++, from++, to++) \
|
||||
rte_write64_relaxed(*from, to); \
|
||||
} while(0)
|
||||
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
@ -330,7 +318,5 @@ void ena_rss_key_fill(void *key, size_t size);
|
||||
|
||||
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT 0
|
||||
|
||||
#define ENA_PRIu64 PRIu64
|
||||
|
||||
#include "ena_includes.h"
|
||||
#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
|
||||
|
@ -85,7 +85,7 @@ struct ena_stats {
|
||||
* Each rte_memzone should have unique name.
|
||||
* To satisfy it, count number of allocation and add it to name.
|
||||
*/
|
||||
rte_atomic32_t ena_alloc_cnt;
|
||||
rte_atomic64_t ena_alloc_cnt;
|
||||
|
||||
static const struct ena_stats ena_stats_global_strings[] = {
|
||||
ENA_STAT_GLOBAL_ENTRY(wd_expired),
|
||||
|
Loading…
Reference in New Issue
Block a user