remove extra parentheses in return statement
fix the error reported by checkpatch: "ERROR: return is not a function, parentheses are not required" remove parentheses in return like: "return (logical expressions)" remove parentheses in return a function like: "return (rte_mempool_lookup(...))" Fixes: 6307b909b8e0 ("lib: remove extra parenthesis after return") Signed-off-by: Huawei Xie <huawei.xie@intel.com>
This commit is contained in:
parent
6e7caa1ad9
commit
693f715da4
@ -2420,11 +2420,11 @@ parse_item_list(char* str, const char* item_name, unsigned int max_items,
|
||||
}
|
||||
if (c != ',') {
|
||||
printf("character %c is not a decimal digit\n", c);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
if (! value_ok) {
|
||||
printf("No valid value before comma\n");
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
if (nb_item < max_items) {
|
||||
parsed_items[nb_item] = value;
|
||||
@ -2436,11 +2436,11 @@ parse_item_list(char* str, const char* item_name, unsigned int max_items,
|
||||
if (nb_item >= max_items) {
|
||||
printf("Number of %s = %u > %u (maximum items)\n",
|
||||
item_name, nb_item + 1, max_items);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
parsed_items[nb_item++] = value;
|
||||
if (! check_unique_values)
|
||||
return (nb_item);
|
||||
return nb_item;
|
||||
|
||||
/*
|
||||
* Then, check that all values in the list are differents.
|
||||
@ -2451,11 +2451,11 @@ parse_item_list(char* str, const char* item_name, unsigned int max_items,
|
||||
if (parsed_items[j] == parsed_items[i]) {
|
||||
printf("duplicated %s %u at index %u and %u\n",
|
||||
item_name, parsed_items[i], i, j);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (nb_item);
|
||||
return nb_item;
|
||||
}
|
||||
|
||||
struct cmd_set_list_result {
|
||||
|
@ -750,7 +750,7 @@ ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
|
||||
printf("%s ring memory zoneof (port %d, queue %d) not"
|
||||
"found (zone name = %s\n",
|
||||
ring_name, port_id, q_id, mz_name);
|
||||
return (mz);
|
||||
return mz;
|
||||
}
|
||||
|
||||
union igb_ring_dword {
|
||||
|
@ -96,7 +96,7 @@ tx_mbuf_alloc(struct rte_mempool *mp)
|
||||
|
||||
m = __rte_mbuf_raw_alloc(mp);
|
||||
__rte_mbuf_sanity_check_raw(m, 0);
|
||||
return (m);
|
||||
return m;
|
||||
}
|
||||
|
||||
|
||||
|
@ -64,7 +64,7 @@ get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
|
||||
nb = pg_num * sizeof(*pa);
|
||||
|
||||
if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0)
|
||||
return (ENOENT);
|
||||
return ENOENT;
|
||||
|
||||
if ((rc = pread(fd, pa, nb, ofs)) < 0 || (rc -= nb) != 0) {
|
||||
|
||||
@ -79,7 +79,7 @@ get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
|
||||
for (i = 0; i != pg_num; i++)
|
||||
pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
|
||||
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct rte_mempool *
|
||||
@ -103,7 +103,7 @@ mempool_anon_create(const char *name, unsigned elt_num, unsigned elt_size,
|
||||
pg_sz = getpagesize();
|
||||
if (rte_is_power_of_2(pg_sz) == 0) {
|
||||
rte_errno = EINVAL;
|
||||
return (mp);
|
||||
return mp;
|
||||
}
|
||||
|
||||
pg_shift = rte_bsf32(pg_sz);
|
||||
@ -122,7 +122,7 @@ mempool_anon_create(const char *name, unsigned elt_num, unsigned elt_size,
|
||||
"error code: %d\n",
|
||||
__func__, name, sz, errno);
|
||||
rte_errno = rc;
|
||||
return (mp);
|
||||
return mp;
|
||||
}
|
||||
|
||||
/* extract physical mappings of the allocated memory. */
|
||||
@ -177,7 +177,7 @@ mempool_anon_create(const char *name, unsigned elt_num, unsigned elt_size,
|
||||
}
|
||||
|
||||
free(pa);
|
||||
return (mp);
|
||||
return mp;
|
||||
}
|
||||
|
||||
#else /* RTE_EXEC_ENV_LINUXAPP */
|
||||
@ -195,7 +195,7 @@ mempool_anon_create(__rte_unused const char *name,
|
||||
__rte_unused int socket_id, __rte_unused unsigned flags)
|
||||
{
|
||||
rte_errno = ENOTSUP;
|
||||
return (NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* RTE_EXEC_ENV_LINUXAPP */
|
||||
|
@ -420,7 +420,7 @@ mbuf_pool_find(unsigned int sock_id)
|
||||
char pool_name[RTE_MEMPOOL_NAMESIZE];
|
||||
|
||||
mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name));
|
||||
return (rte_mempool_lookup((const char *)pool_name));
|
||||
return rte_mempool_lookup((const char *)pool_name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,7 +93,7 @@ tx_mbuf_alloc(struct rte_mempool *mp)
|
||||
|
||||
m = __rte_mbuf_raw_alloc(mp);
|
||||
__rte_mbuf_sanity_check_raw(m, 0);
|
||||
return (m);
|
||||
return m;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -237,7 +237,7 @@ test_kni_allocate_lcores(void)
|
||||
}
|
||||
printf("count: %u\n", count);
|
||||
|
||||
return (count == 2 ? 0 : -1);
|
||||
return count == 2 ? 0 : -1;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -696,7 +696,7 @@ test_refcnt_slave(__attribute__((unused)) void *arg)
|
||||
printf("%s finished at lcore %u, "
|
||||
"number of freed mbufs: %u\n",
|
||||
__func__, lcore, free);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -770,7 +770,7 @@ test_refcnt_master(void)
|
||||
rte_wmb();
|
||||
|
||||
printf("%s finished at lcore %u\n", __func__, lcore);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -786,7 +786,7 @@ test_refcnt_mbuf(void)
|
||||
if ((lnum = rte_lcore_count()) == 1) {
|
||||
printf("skipping %s, number of lcores: %u is not enough\n",
|
||||
__func__, lnum);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
printf("starting %s, at %u lcores\n", __func__, lnum);
|
||||
@ -800,7 +800,7 @@ test_refcnt_mbuf(void)
|
||||
SOCKET_ID_ANY)) == NULL) {
|
||||
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
|
||||
__func__);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (refcnt_mbuf_ring == NULL &&
|
||||
@ -809,7 +809,7 @@ test_refcnt_mbuf(void)
|
||||
RING_F_SP_ENQ)) == NULL) {
|
||||
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
|
||||
"\n", __func__);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
refcnt_stop_slaves = 0;
|
||||
@ -836,7 +836,7 @@ test_refcnt_mbuf(void)
|
||||
rte_ring_dump(stdout, refcnt_mbuf_ring);
|
||||
|
||||
#endif
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include <unistd.h>
|
||||
|
@ -152,8 +152,8 @@ free_buffers(void)
|
||||
static inline size_t
|
||||
get_rand_offset(size_t uoffset)
|
||||
{
|
||||
return (((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
|
||||
~(ALIGNMENT_UNIT - 1)) + uoffset);
|
||||
return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
|
||||
~(ALIGNMENT_UNIT - 1)) + uoffset;
|
||||
}
|
||||
|
||||
/* Fill in source and destination addresses. */
|
||||
|
@ -448,10 +448,10 @@ test_mempool_xmem_misc(void)
|
||||
printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
|
||||
"returns: %#zx, while expected: %#zx;\n",
|
||||
__func__, elt_num, total_size, sz, (size_t)usz);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -609,36 +609,36 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
|
||||
align, bound)) == NULL) {
|
||||
printf("%s(%s): memzone creation failed\n",
|
||||
__func__, name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
|
||||
printf("%s(%s): invalid phys addr alignment\n",
|
||||
__func__, mz->name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
|
||||
printf("%s(%s): invalid virtual addr alignment\n",
|
||||
__func__, mz->name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len ||
|
||||
mz->len < RTE_CACHE_LINE_SIZE) {
|
||||
printf("%s(%s): invalid length\n",
|
||||
__func__, mz->name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((mz->phys_addr & bmask) !=
|
||||
((mz->phys_addr + mz->len - 1) & bmask)) {
|
||||
printf("%s(%s): invalid memzone boundary %u crossed\n",
|
||||
__func__, mz->name, bound);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -654,7 +654,7 @@ test_memzone_bounded(void)
|
||||
100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
|
||||
printf("%s(%s)created a memzone with invalid boundary "
|
||||
"conditions\n", __func__, memzone_err->name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* should fail as len is greater then boundary */
|
||||
@ -663,20 +663,20 @@ test_memzone_bounded(void)
|
||||
100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
|
||||
printf("%s(%s)created a memzone with invalid boundary "
|
||||
"conditions\n", __func__, memzone_err->name);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ static double calc_drop_prob(uint32_t min_th, uint32_t max_th,
|
||||
} else {
|
||||
drop_prob = 1.0;
|
||||
}
|
||||
return (drop_prob);
|
||||
return drop_prob;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -249,7 +249,7 @@ static int check_drop_rate(double *diff, double drop_rate, double drop_prob, dou
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -269,7 +269,7 @@ static int check_avg(double *diff, double avg, double exp_avg, double tolerance)
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -303,10 +303,10 @@ static uint64_t get_machclk_freq(void)
|
||||
USEC_PER_MSEC); /**< diff is in micro secs */
|
||||
|
||||
if (diff == 0)
|
||||
return(0);
|
||||
return 0;
|
||||
|
||||
clk_freq_hz = ((end - start) * USEC_PER_SEC / diff);
|
||||
return (clk_freq_hz);
|
||||
return clk_freq_hz;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -326,14 +326,14 @@ test_rte_red_init(struct test_config *tcfg)
|
||||
(uint16_t)tcfg->tconfig->min_th,
|
||||
(uint16_t)tcfg->tconfig->max_th,
|
||||
(uint16_t)tcfg->tconfig->maxp_inv[i]) != 0) {
|
||||
return(FAIL);
|
||||
return FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
*tcfg->tqueue->q = 0;
|
||||
*tcfg->tvar->dropped = 0;
|
||||
*tcfg->tvar->enqueued = 0;
|
||||
return(PASS);
|
||||
return PASS;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -364,11 +364,11 @@ increase_actual_qsize(struct rte_red_config *red_cfg,
|
||||
* check if target actual queue size has been reached
|
||||
*/
|
||||
if (*q != level)
|
||||
return (-1);
|
||||
return -1;
|
||||
/**
|
||||
* success
|
||||
*/
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -395,11 +395,11 @@ increase_average_qsize(struct rte_red_config *red_cfg,
|
||||
*/
|
||||
avg = rte_red_get_avg_int(red_cfg, red);
|
||||
if (avg != level)
|
||||
return (-1);
|
||||
return -1;
|
||||
/**
|
||||
* success
|
||||
*/
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -572,7 +572,7 @@ static enum test_result func_test1(struct test_config *tcfg)
|
||||
(double)tcfg->tqueue->drop_tolerance);
|
||||
}
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -683,7 +683,7 @@ static enum test_result func_test2(struct test_config *tcfg)
|
||||
(double)tcfg->tqueue->drop_tolerance);
|
||||
}
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -796,7 +796,7 @@ static enum test_result func_test3(struct test_config *tcfg)
|
||||
diff <= (double)tcfg->tqueue->avg_tolerance ? "pass" : "fail");
|
||||
}
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -938,7 +938,7 @@ static enum test_result func_test4(struct test_config *tcfg)
|
||||
diff, (double)tcfg->tqueue->avg_tolerance,
|
||||
diff <= (double)tcfg->tqueue->avg_tolerance ? "pass" : "fail");
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1078,7 +1078,7 @@ static enum test_result func_test5(struct test_config *tcfg)
|
||||
diff, (double)tcfg->tqueue->drop_tolerance);
|
||||
}
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1209,7 +1209,7 @@ static enum test_result func_test6(struct test_config *tcfg)
|
||||
diff <= tcfg->tqueue->avg_tolerance ? "pass" : "fail");
|
||||
}
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1380,7 +1380,7 @@ static enum test_result perf1_test(struct test_config *tcfg)
|
||||
|
||||
rdtsc_prof_print(&prof);
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1567,7 +1567,7 @@ static enum test_result perf2_test(struct test_config *tcfg)
|
||||
|
||||
rdtsc_prof_print(&prof);
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1731,7 +1731,7 @@ static enum test_result ovfl_test1(struct test_config *tcfg)
|
||||
*tcfg->tvar->enqueued, *tcfg->tvar->dropped,
|
||||
drop_prob * 100.0, drop_rate * 100.0);
|
||||
out:
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1870,7 +1870,7 @@ test_red(void)
|
||||
printf("[total: %u, pass: %u, fail: %u]\n", num_tests, num_pass, num_tests - num_pass);
|
||||
ret = -1;
|
||||
}
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct test_command red_cmd = {
|
||||
|
@ -112,7 +112,7 @@ static struct rte_ring *r;
|
||||
printf("error at %s:%d\tcondition " #exp " failed\n", \
|
||||
__func__, __LINE__); \
|
||||
rte_ring_dump(stdout, r); \
|
||||
return (-1); \
|
||||
return -1; \
|
||||
}
|
||||
|
||||
#define TEST_RING_FULL_EMTPY_ITER 8
|
||||
@ -274,7 +274,7 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
|
||||
TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
|
||||
rte_ring_dump(stdout, r);
|
||||
}
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -222,7 +222,7 @@ Then, the number of ports in the destination portmask is calculated with the hel
|
||||
|
||||
for (n = 0; v != 0; v &= v - 1, n++)
|
||||
;
|
||||
return (n);
|
||||
return n;
|
||||
}
|
||||
|
||||
This is done to determine which forwarding algorithm to use.
|
||||
@ -344,13 +344,13 @@ It is the mcast_out_pkt() function that performs the packet duplication (either
|
||||
/* Create new mbuf for the header. */
|
||||
|
||||
if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
|
||||
return (NULL);
|
||||
return NULL;
|
||||
|
||||
/* If requested, then make a new clone packet. */
|
||||
|
||||
if (use_clone != 0 && unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
|
||||
rte_pktmbuf_free(hdr);
|
||||
return (NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* prepend new header */
|
||||
@ -370,5 +370,5 @@ It is the mcast_out_pkt() function that performs the packet duplication (either
|
||||
hdr->ol_flags = pkt->ol_flags;
|
||||
rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
|
||||
|
||||
return (hdr);
|
||||
return hdr;
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
|
||||
qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (qp == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
qp->id = qp_id;
|
||||
dev->data->queue_pairs[qp_id] = qp;
|
||||
|
@ -294,8 +294,8 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
|
||||
tail = queue->tail;
|
||||
|
||||
/* Find how many can actually fit on the ring */
|
||||
overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
|
||||
- queue->max_inflights);
|
||||
overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
|
||||
- queue->max_inflights;
|
||||
if (overflow > 0) {
|
||||
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
|
||||
nb_pkts_possible = nb_pkts - overflow;
|
||||
|
@ -150,13 +150,13 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
|
||||
(qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
|
||||
PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
|
||||
qp_conf->nb_descriptors);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->pci_dev->mem_resource[0].addr == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Could not find VF config space "
|
||||
"(UIO driver attached?).");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (queue_pair_id >=
|
||||
@ -164,7 +164,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
|
||||
ADF_NUM_BUNDLES_PER_DEV)) {
|
||||
PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
|
||||
queue_pair_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Allocate the queue pair data structure. */
|
||||
@ -172,7 +172,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
|
||||
sizeof(*qp), RTE_CACHE_LINE_SIZE);
|
||||
if (qp == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
|
||||
rte_atomic16_init(&qp->inflights16);
|
||||
@ -198,7 +198,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
|
||||
|
||||
create_err:
|
||||
rte_free(qp);
|
||||
return (-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
|
||||
@ -293,7 +293,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
|
||||
PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -306,7 +306,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
|
||||
socket_id);
|
||||
if (qp_mz == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
queue->base_addr = (char *)qp_mz->addr;
|
||||
@ -322,7 +322,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
|
||||
if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
|
||||
!= 0) {
|
||||
PMD_DRV_LOG(ERR, "Invalid num inflights");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
|
||||
@ -336,7 +336,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
|
||||
|
||||
if (queue->max_inflights < 2) {
|
||||
PMD_DRV_LOG(ERR, "Invalid num inflights");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
queue->head = 0;
|
||||
queue->tail = 0;
|
||||
@ -361,7 +361,7 @@ static int qat_qp_check_queue_alignment(uint64_t phys_addr,
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
if (((queue_size_bytes - 1) & phys_addr) != 0)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -378,7 +378,7 @@ static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
|
||||
return 0;
|
||||
}
|
||||
PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
|
||||
|
@ -292,13 +292,13 @@ void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx)
|
||||
|
||||
uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type)
|
||||
{
|
||||
return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
|
||||
DMAE_COMMAND_C_TYPE_ENABLE));
|
||||
return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
|
||||
DMAE_COMMAND_C_TYPE_ENABLE);
|
||||
}
|
||||
|
||||
uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode)
|
||||
{
|
||||
return (opcode & ~DMAE_COMMAND_SRC_RESET);
|
||||
return opcode & ~DMAE_COMMAND_SRC_RESET;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -1098,7 +1098,7 @@ static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp)
|
||||
|
||||
mb(); /* status block fields can change */
|
||||
hw_cons = le16toh(*fp->tx_cons_sb);
|
||||
return (hw_cons != txq->tx_pkt_head);
|
||||
return hw_cons != txq->tx_pkt_head;
|
||||
}
|
||||
|
||||
static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
|
||||
@ -1122,7 +1122,7 @@ static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
|
||||
if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
|
||||
MAX_RCQ_ENTRIES(rxq)))
|
||||
rx_cq_cons_sb++;
|
||||
return (rxq->rx_cq_head != rx_cq_cons_sb);
|
||||
return rxq->rx_cq_head != rx_cq_cons_sb;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1280,7 +1280,7 @@ next_cqe:
|
||||
/* Update producers */
|
||||
bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
|
||||
|
||||
return (sw_cq_cons != hw_cq_cons);
|
||||
return sw_cq_cons != hw_cq_cons;
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
@ -2559,7 +2559,7 @@ static void bnx2x_clear_reset_global(struct bnx2x_softc *sc)
|
||||
/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
|
||||
static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc)
|
||||
{
|
||||
return (REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT);
|
||||
return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT;
|
||||
}
|
||||
|
||||
/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
|
||||
@ -2618,7 +2618,7 @@ static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine)
|
||||
|
||||
val = ((val & mask) >> shift);
|
||||
|
||||
return (val != 0);
|
||||
return val != 0;
|
||||
}
|
||||
|
||||
/* set pf load mark */
|
||||
@ -4860,9 +4860,9 @@ bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid,
|
||||
static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
|
||||
{
|
||||
if (CHIP_IS_E1x(fp->sc)) {
|
||||
return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
|
||||
return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H;
|
||||
} else {
|
||||
return (fp->cl_id);
|
||||
return fp->cl_id;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4872,9 +4872,9 @@ bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp)
|
||||
uint32_t offset = BAR_USTRORM_INTMEM;
|
||||
|
||||
if (IS_VF(sc)) {
|
||||
return (PXP_VF_ADDR_USDM_QUEUES_START +
|
||||
return PXP_VF_ADDR_USDM_QUEUES_START +
|
||||
(sc->acquire_resp.resc.hw_qid[fp->index] *
|
||||
sizeof(struct ustorm_queue_zone_data)));
|
||||
sizeof(struct ustorm_queue_zone_data));
|
||||
} else if (!CHIP_IS_E1x(sc)) {
|
||||
offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
|
||||
} else {
|
||||
@ -7587,8 +7587,8 @@ static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg)
|
||||
|
||||
static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
|
||||
{
|
||||
return (bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
|
||||
PCIM_EXP_STA_TRANSACTION_PND);
|
||||
return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
|
||||
PCIM_EXP_STA_TRANSACTION_PND;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9922,7 +9922,7 @@ static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc)
|
||||
{
|
||||
uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
|
||||
uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
|
||||
return (base + (SC_ABS_FUNC(sc)) * stride);
|
||||
return base + (SC_ABS_FUNC(sc)) * stride;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -10777,11 +10777,11 @@ static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc)
|
||||
{
|
||||
/* adjust polling timeout */
|
||||
if (CHIP_REV_IS_EMUL(sc)) {
|
||||
return (FLR_POLL_CNT * 2000);
|
||||
return FLR_POLL_CNT * 2000;
|
||||
}
|
||||
|
||||
if (CHIP_REV_IS_FPGA(sc)) {
|
||||
return (FLR_POLL_CNT * 120);
|
||||
return FLR_POLL_CNT * 120;
|
||||
}
|
||||
|
||||
return FLR_POLL_CNT;
|
||||
|
@ -1839,7 +1839,7 @@ bnx2x_ack_int(struct bnx2x_softc *sc)
|
||||
static inline int
|
||||
func_by_vn(struct bnx2x_softc *sc, int vn)
|
||||
{
|
||||
return (2 * vn + SC_PORT(sc));
|
||||
return 2 * vn + SC_PORT(sc);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1874,7 +1874,7 @@ bnx2x_stats_id(struct bnx2x_fastpath *fp)
|
||||
return fp->cl_id;
|
||||
}
|
||||
|
||||
return (fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x);
|
||||
return fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x;
|
||||
}
|
||||
|
||||
int bnx2x_init(struct bnx2x_softc *sc);
|
||||
|
@ -89,7 +89,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (NULL == rxq) {
|
||||
PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->sc = sc;
|
||||
rxq->mb_pool = mp;
|
||||
@ -121,7 +121,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (NULL == dma) {
|
||||
PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
|
||||
bnx2x_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
|
||||
rxq->rx_ring = (uint64_t*)dma->addr;
|
||||
@ -142,7 +142,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (NULL == rxq->sw_ring) {
|
||||
PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
|
||||
bnx2x_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Initialize software ring entries */
|
||||
@ -153,7 +153,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
|
||||
(unsigned)rxq->queue_id, idx);
|
||||
bnx2x_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->sw_ring[idx] = mbuf;
|
||||
rxq->rx_ring[idx] = mbuf->buf_physaddr;
|
||||
@ -169,7 +169,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
|
||||
if (NULL == dma) {
|
||||
PMD_RX_LOG(ERR, "RCQ alloc failed");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
|
||||
rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
|
||||
@ -278,7 +278,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
txq->sc = sc;
|
||||
|
||||
txq->nb_tx_pages = 1;
|
||||
@ -302,7 +302,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
|
||||
if (tz == NULL) {
|
||||
bnx2x_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
|
||||
txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
|
||||
@ -314,7 +314,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq->sw_ring == NULL) {
|
||||
bnx2x_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
|
||||
|
@ -59,7 +59,7 @@ bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
|
||||
val = (uint8_t)(*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
|
||||
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val);
|
||||
|
||||
return (val);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -75,7 +75,7 @@ bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
|
||||
val = (uint16_t)(*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
|
||||
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
|
||||
|
||||
return (val);
|
||||
return val;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -92,5 +92,5 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
|
||||
val = (uint32_t)(*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
|
||||
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
|
||||
|
||||
return (val);
|
||||
return val;
|
||||
}
|
||||
|
@ -1234,7 +1234,7 @@ static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc)
|
||||
port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
|
||||
if (port4mode_ovwr_val & (1 << 0)) {
|
||||
/* Return 4-port mode override value */
|
||||
return ((port4mode_ovwr_val & (1 << 1)) == (1 << 1));
|
||||
return (port4mode_ovwr_val & (1 << 1)) == (1 << 1);
|
||||
}
|
||||
/* Return 4-port mode from input pin */
|
||||
return (uint8_t) REG_RD(sc, MISC_REG_PORT4MODE_EN);
|
||||
|
@ -480,7 +480,7 @@ ether_hash(struct ether_hdr *eth_hdr)
|
||||
static inline uint32_t
|
||||
ipv4_hash(struct ipv4_hdr *ipv4_hdr)
|
||||
{
|
||||
return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
|
||||
return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
|
@ -200,7 +200,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
|
||||
|
||||
static inline bool is_x_1g_port(const struct link_config *lc)
|
||||
{
|
||||
return ((lc->supported & FW_PORT_CAP_SPEED_1G) != 0);
|
||||
return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0;
|
||||
}
|
||||
|
||||
static inline bool is_x_10g_port(const struct link_config *lc)
|
||||
|
@ -272,7 +272,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
|
||||
"failed to init HW",
|
||||
eth_dev->data->port_id, pci_dev->id.vendor_id,
|
||||
pci_dev->id.device_id);
|
||||
return -(ENODEV);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
@ -282,7 +282,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
|
||||
"store MAC addresses",
|
||||
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
return -(ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Copy the permanent MAC address */
|
||||
@ -299,7 +299,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
|
||||
rte_intr_callback_register(&(pci_dev->intr_handle),
|
||||
eth_em_interrupt_handler, (void *)eth_dev);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -431,11 +431,11 @@ em_hw_init(struct e1000_hw *hw)
|
||||
PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
|
||||
"SOL/IDER session");
|
||||
}
|
||||
return (0);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
em_hw_control_release(hw);
|
||||
return (diag);
|
||||
return diag;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -448,7 +448,7 @@ eth_em_configure(struct rte_eth_dev *dev)
|
||||
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -536,7 +536,7 @@ eth_em_start(struct rte_eth_dev *dev)
|
||||
/* Initialize the hardware */
|
||||
if (em_hardware_init(hw)) {
|
||||
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
|
||||
@ -662,14 +662,14 @@ eth_em_start(struct rte_eth_dev *dev)
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "<<");
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
|
||||
error_invalid_config:
|
||||
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
|
||||
dev->data->dev_conf.link_speed,
|
||||
dev->data->dev_conf.link_duplex, dev->data->port_id);
|
||||
em_dev_clear_queues(dev);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -802,9 +802,9 @@ em_hardware_init(struct e1000_hw *hw)
|
||||
|
||||
diag = e1000_init_hw(hw);
|
||||
if (diag < 0)
|
||||
return (diag);
|
||||
return diag;
|
||||
e1000_check_for_link(hw);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function is based on em_update_stats_counters() in e1000/if_em.c */
|
||||
@ -972,14 +972,14 @@ em_get_max_pktlen(const struct e1000_hw *hw)
|
||||
case e1000_82574:
|
||||
case e1000_80003es2lan: /* 9K Jumbo Frame size */
|
||||
case e1000_82583:
|
||||
return (0x2412);
|
||||
return 0x2412;
|
||||
case e1000_pchlan:
|
||||
return (0x1000);
|
||||
return 0x1000;
|
||||
/* Adapters that do not support jumbo frames */
|
||||
case e1000_ich8lan:
|
||||
return (ETHER_MAX_LEN);
|
||||
return ETHER_MAX_LEN;
|
||||
default:
|
||||
return (MAX_JUMBO_FRAME_SIZE);
|
||||
return MAX_JUMBO_FRAME_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1360,7 +1360,7 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev)
|
||||
E1000_READ_REG(hw, E1000_ICR);
|
||||
regval = E1000_READ_REG(hw, E1000_IMS);
|
||||
E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1552,7 +1552,7 @@ eth_em_led_on(struct rte_eth_dev *dev)
|
||||
struct e1000_hw *hw;
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
|
||||
return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1561,7 +1561,7 @@ eth_em_led_off(struct rte_eth_dev *dev)
|
||||
struct e1000_hw *hw;
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
|
||||
return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1633,7 +1633,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
(fc_conf->high_water < fc_conf->low_water)) {
|
||||
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
|
||||
PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
|
||||
@ -1663,7 +1663,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -85,7 +85,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
|
||||
|
||||
m = __rte_mbuf_raw_alloc(mp);
|
||||
__rte_mbuf_sanity_check_raw(m, 0);
|
||||
return (m);
|
||||
return m;
|
||||
}
|
||||
|
||||
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
|
||||
@ -312,10 +312,10 @@ what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
|
||||
if (likely (txq->ctx_cache.flags == flags &&
|
||||
((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
|
||||
txq->ctx_cache.cmp_mask) == 0))
|
||||
return (EM_CTX_0);
|
||||
return EM_CTX_0;
|
||||
|
||||
/* Mismatch */
|
||||
return (EM_CTX_NUM);
|
||||
return EM_CTX_NUM;
|
||||
}
|
||||
|
||||
/* Reset transmit descriptors after they have been used */
|
||||
@ -373,7 +373,7 @@ em_xmit_cleanup(struct em_tx_queue *txq)
|
||||
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
|
||||
|
||||
/* No Error */
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -385,7 +385,7 @@ tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
|
||||
|
||||
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
|
||||
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
|
||||
return (tmp);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -493,7 +493,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (em_xmit_cleanup(txq) != 0) {
|
||||
/* Could not clean any descriptors */
|
||||
if (nb_tx == 0)
|
||||
return (0);
|
||||
return 0;
|
||||
goto end_of_tx;
|
||||
}
|
||||
}
|
||||
@ -630,7 +630,7 @@ end_of_tx:
|
||||
E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
|
||||
txq->tx_tail = tx_id;
|
||||
|
||||
return (nb_tx);
|
||||
return nb_tx;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -659,7 +659,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_error)
|
||||
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
|
||||
if (rx_error & E1000_RXD_ERR_TCPE)
|
||||
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
|
||||
return (pkt_flags);
|
||||
return pkt_flags;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -833,7 +833,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_hold = 0;
|
||||
}
|
||||
rxq->nb_rx_hold = nb_hold;
|
||||
return (nb_rx);
|
||||
return nb_rx;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -1078,7 +1078,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_hold = 0;
|
||||
}
|
||||
rxq->nb_rx_hold = nb_hold;
|
||||
return (nb_rx);
|
||||
return nb_rx;
|
||||
}
|
||||
|
||||
#define EM_MAX_BUF_SIZE 16384
|
||||
@ -1234,19 +1234,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (tz == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate the tx queue data structure. */
|
||||
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
|
||||
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate software ring */
|
||||
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
||||
sizeof(txq->sw_ring[0]) * nb_desc,
|
||||
RTE_CACHE_LINE_SIZE)) == NULL) {
|
||||
em_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
@ -1268,7 +1268,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
em_reset_tx_queue(txq);
|
||||
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1335,7 +1335,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (nb_desc % EM_RXD_ALIGN != 0 ||
|
||||
(nb_desc > E1000_MAX_RING_DESC) ||
|
||||
(nb_desc < E1000_MIN_RING_DESC)) {
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1344,7 +1344,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (rx_conf->rx_drop_en) {
|
||||
PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
|
||||
"device");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Free memory prior to re-allocation if needed. */
|
||||
@ -1358,19 +1358,19 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (rz == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate the RX queue data structure. */
|
||||
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
|
||||
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate software ring. */
|
||||
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
||||
sizeof (rxq->sw_ring[0]) * nb_desc,
|
||||
RTE_CACHE_LINE_SIZE)) == NULL) {
|
||||
em_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rxq->mb_pool = mp;
|
||||
@ -1395,7 +1395,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
dev->data->rx_queues[queue_idx] = rxq;
|
||||
em_reset_rx_queue(rxq);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -1546,12 +1546,12 @@ em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
|
||||
i++) {
|
||||
if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
|
||||
*bufsz = bufsz_to_rctl[i].bufsz;
|
||||
return (bufsz_to_rctl[i].rctl);
|
||||
return bufsz_to_rctl[i].rctl;
|
||||
}
|
||||
}
|
||||
|
||||
/* Should never happen. */
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1572,7 +1572,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
|
||||
if (mbuf == NULL) {
|
||||
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
|
||||
"queue_id=%hu", rxq->queue_id);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
|
||||
|
@ -791,7 +791,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
|
||||
err_late:
|
||||
igb_hw_control_release(hw);
|
||||
|
||||
return (error);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1010,7 +1010,7 @@ rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
rte_eth_driver_register(&rte_igbvf_pmd);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1146,7 +1146,7 @@ eth_igb_start(struct rte_eth_dev *dev)
|
||||
/* Initialize the hardware */
|
||||
if (igb_hardware_init(hw)) {
|
||||
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
adapter->stopped = 0;
|
||||
|
||||
@ -1289,14 +1289,14 @@ eth_igb_start(struct rte_eth_dev *dev)
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "<<");
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
|
||||
error_invalid_config:
|
||||
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
|
||||
dev->data->dev_conf.link_speed,
|
||||
dev->data->dev_conf.link_duplex, dev->data->port_id);
|
||||
igb_dev_clear_queues(dev);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -1489,13 +1489,13 @@ igb_hardware_init(struct e1000_hw *hw)
|
||||
|
||||
diag = e1000_init_hw(hw);
|
||||
if (diag < 0)
|
||||
return (diag);
|
||||
return diag;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
|
||||
e1000_get_phy_info(hw);
|
||||
e1000_check_for_link(hw);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
|
||||
@ -2510,7 +2510,7 @@ eth_igb_led_on(struct rte_eth_dev *dev)
|
||||
struct e1000_hw *hw;
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
|
||||
return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2519,7 +2519,7 @@ eth_igb_led_off(struct rte_eth_dev *dev)
|
||||
struct e1000_hw *hw;
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
|
||||
return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2591,7 +2591,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
(fc_conf->high_water < fc_conf->low_water)) {
|
||||
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
|
||||
PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
|
||||
@ -2621,7 +2621,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#define E1000_RAH_POOLSEL_SHIFT (18)
|
||||
|
@ -86,7 +86,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
|
||||
|
||||
m = __rte_mbuf_raw_alloc(mp);
|
||||
__rte_mbuf_sanity_check_raw(m, 0);
|
||||
return (m);
|
||||
return m;
|
||||
}
|
||||
|
||||
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
|
||||
@ -366,7 +366,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
|
||||
}
|
||||
|
||||
/* Mismatch, use the previous context */
|
||||
return (IGB_CTX_NUM);
|
||||
return IGB_CTX_NUM;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -518,7 +518,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
*/
|
||||
if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
|
||||
if (nb_tx == 0)
|
||||
return (0);
|
||||
return 0;
|
||||
goto end_of_tx;
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
(unsigned) tx_id, (unsigned) nb_tx);
|
||||
txq->tx_tail = tx_id;
|
||||
|
||||
return (nb_tx);
|
||||
return nb_tx;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -944,7 +944,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_hold = 0;
|
||||
}
|
||||
rxq->nb_rx_hold = nb_hold;
|
||||
return (nb_rx);
|
||||
return nb_rx;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -1199,7 +1199,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_hold = 0;
|
||||
}
|
||||
rxq->nb_rx_hold = nb_hold;
|
||||
return (nb_rx);
|
||||
return nb_rx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1335,7 +1335,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate TX ring hardware descriptors. A memzone large enough to
|
||||
@ -1347,7 +1347,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
E1000_ALIGN, socket_id);
|
||||
if (tz == NULL) {
|
||||
igb_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
@ -1371,7 +1371,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq->sw_ring == NULL) {
|
||||
igb_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
|
||||
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
|
||||
@ -1380,7 +1380,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
dev->tx_pkt_burst = eth_igb_xmit_pkts;
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1453,7 +1453,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (nb_desc % IGB_RXD_ALIGN != 0 ||
|
||||
(nb_desc > E1000_MAX_RING_DESC) ||
|
||||
(nb_desc < E1000_MIN_RING_DESC)) {
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Free memory prior to re-allocation if needed */
|
||||
@ -1466,7 +1466,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (rxq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
rxq->mb_pool = mp;
|
||||
rxq->nb_rx_desc = nb_desc;
|
||||
rxq->pthresh = rx_conf->rx_thresh.pthresh;
|
||||
@ -1493,7 +1493,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
E1000_ALIGN, socket_id);
|
||||
if (rz == NULL) {
|
||||
igb_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
|
||||
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
|
||||
@ -1506,7 +1506,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (rxq->sw_ring == NULL) {
|
||||
igb_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
|
||||
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
|
||||
@ -1967,7 +1967,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
|
||||
if (mbuf == NULL) {
|
||||
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
|
||||
"queue_id=%hu", rxq->queue_id);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dma_addr =
|
||||
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
|
||||
|
@ -251,5 +251,5 @@ int enic_clsf_init(struct enic *enic)
|
||||
enic->fdir.hash = rte_hash_create(&hash_params);
|
||||
memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
|
||||
enic->fdir.stats.free = ENICPMD_FDIR_MAX;
|
||||
return (NULL == enic->fdir.hash);
|
||||
return NULL == enic->fdir.hash;
|
||||
}
|
||||
|
@ -1350,12 +1350,12 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
|
||||
if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
|
||||
PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vlan_id > ETH_VLAN_ID_MAX) {
|
||||
PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vid_idx = FM10K_VFTA_IDX(vlan_id);
|
||||
@ -1367,7 +1367,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
|
||||
PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
|
||||
"in the VLAN filter table");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fm10k_mbx_lock(hw);
|
||||
@ -1375,7 +1375,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
fm10k_mbx_unlock(hw);
|
||||
if (result != FM10K_SUCCESS) {
|
||||
PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
|
||||
@ -1396,7 +1396,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
}
|
||||
if (result != FM10K_SUCCESS) {
|
||||
PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (on) {
|
||||
@ -1579,7 +1579,7 @@ handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
|
||||
rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
|
||||
FM10K_RX_FREE_THRESH_MIN(q),
|
||||
FM10K_RX_FREE_THRESH_DIV(q));
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
q->alloc_thresh = rx_free_thresh;
|
||||
@ -1635,7 +1635,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
/* make sure the mempool element size can account for alignment. */
|
||||
if (!mempool_element_size_valid(mp)) {
|
||||
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* make sure a valid number of descriptors have been requested */
|
||||
@ -1647,7 +1647,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
"and a multiple of %u",
|
||||
nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
|
||||
FM10K_MULT_RX_DESC);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1665,7 +1665,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
socket_id);
|
||||
if (q == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* setup queue */
|
||||
@ -1677,7 +1677,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
q->tail_ptr = (volatile uint32_t *)
|
||||
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
|
||||
if (handle_rxconf(q, conf))
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate memory for the software ring */
|
||||
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
|
||||
@ -1686,7 +1686,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
if (q->sw_ring == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
|
||||
rte_free(q);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1701,7 +1701,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
|
||||
rte_free(q->sw_ring);
|
||||
rte_free(q);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
q->hw_ring = mz->addr;
|
||||
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
|
||||
@ -1753,7 +1753,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
|
||||
tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
|
||||
FM10K_TX_FREE_THRESH_MIN(q),
|
||||
FM10K_TX_FREE_THRESH_DIV(q));
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
q->free_thresh = tx_free_thresh;
|
||||
@ -1777,7 +1777,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
|
||||
tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
|
||||
FM10K_TX_RS_THRESH_MIN(q),
|
||||
FM10K_TX_RS_THRESH_DIV(q));
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
q->rs_thresh = tx_rs_thresh;
|
||||
@ -1805,7 +1805,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
"and a multiple of %u",
|
||||
nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
|
||||
FM10K_MULT_TX_DESC);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1825,7 +1825,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
socket_id);
|
||||
if (q == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* setup queue */
|
||||
@ -1837,7 +1837,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
q->tail_ptr = (volatile uint32_t *)
|
||||
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
|
||||
if (handle_txconf(q, conf))
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate memory for the software ring */
|
||||
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
|
||||
@ -1846,7 +1846,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
if (q->sw_ring == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
|
||||
rte_free(q);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1861,7 +1861,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
|
||||
rte_free(q->sw_ring);
|
||||
rte_free(q);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
q->hw_ring = mz->addr;
|
||||
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
|
||||
@ -1878,7 +1878,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
|
||||
rte_free(q->sw_ring);
|
||||
rte_free(q);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->data->tx_queues[queue_id] = q;
|
||||
|
@ -3413,7 +3413,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
|
||||
pool->num_free -= valid_entry->len;
|
||||
pool->num_alloc += valid_entry->len;
|
||||
|
||||
return (valid_entry->base + pool->base);
|
||||
return valid_entry->base + pool->base;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -656,7 +656,7 @@ i40e_calc_itr_interval(int16_t interval)
|
||||
interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
|
||||
|
||||
/* Convert to hardware count, as writing each 1 represents 2 us */
|
||||
return (interval / 2);
|
||||
return interval / 2;
|
||||
}
|
||||
|
||||
#define I40E_VALID_FLOW(flow_type) \
|
||||
|
@ -416,7 +416,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
|
||||
PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
|
||||
args->ops, info.ops);
|
||||
|
||||
return (err | info.result);
|
||||
return err | info.result;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1483,7 +1483,7 @@ i40e_calc_context_desc(uint64_t flags)
|
||||
mask |= PKT_TX_IEEE1588_TMST;
|
||||
#endif
|
||||
|
||||
return ((flags & mask) ? 1 : 0);
|
||||
return (flags & mask) ? 1 : 0;
|
||||
}
|
||||
|
||||
/* set i40e TSO context descriptor */
|
||||
@ -2147,7 +2147,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!rxq) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||
"rx queue data structure");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->mp = mp;
|
||||
rxq->nb_rx_desc = nb_desc;
|
||||
@ -2174,7 +2174,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!rz) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Zero all the descriptors in the ring. */
|
||||
@ -2198,7 +2198,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!rxq->sw_ring) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
i40e_reset_rx_queue(rxq);
|
||||
@ -2437,7 +2437,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!txq) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||
"tx queue structure");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate TX hardware ring descriptors. */
|
||||
@ -2448,7 +2448,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!tz) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
@ -2481,7 +2481,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (!txq->sw_ring) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
i40e_reset_tx_queue(txq);
|
||||
|
@ -268,7 +268,7 @@ ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
|
||||
} else {
|
||||
media_type = ixgbe_get_media_type_82599(hw);
|
||||
}
|
||||
return (media_type);
|
||||
return media_type;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -310,5 +310,5 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
|
||||
hw->mac.ops.flap_tx_laser = NULL;
|
||||
}
|
||||
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
|
||||
*/
|
||||
*state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
|
||||
|
||||
return (ret_val);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1323,7 +1323,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
*/
|
||||
if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
|
||||
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
|
||||
return (diag);
|
||||
return diag;
|
||||
}
|
||||
|
||||
/* negotiate mailbox API version to use with the PF. */
|
||||
@ -1374,7 +1374,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
default:
|
||||
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
|
||||
return (-EIO);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
|
||||
@ -1478,7 +1478,7 @@ rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unus
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
rte_eth_driver_register(&rte_ixgbevf_pmd);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2162,7 +2162,7 @@ skip_link_setup:
|
||||
|
||||
ixgbe_restore_statistics_mapping(dev);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
|
||||
@ -3248,7 +3248,7 @@ ixgbe_dev_led_on(struct rte_eth_dev *dev)
|
||||
struct ixgbe_hw *hw;
|
||||
|
||||
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
|
||||
return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -3257,7 +3257,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
|
||||
struct ixgbe_hw *hw;
|
||||
|
||||
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
|
||||
return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -3339,7 +3339,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
(fc_conf->high_water < fc_conf->low_water)) {
|
||||
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
|
||||
PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
|
||||
@ -3561,7 +3561,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
|
||||
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
|
||||
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
|
||||
PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
|
||||
@ -4026,7 +4026,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
|
||||
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
|
||||
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
|
||||
PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -4083,7 +4083,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
|
||||
|
||||
/* The UTA table only exists on 82599 hardware and newer */
|
||||
if (hw->mac.type < ixgbe_mac_82599EB)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
vector = ixgbe_uta_vector(hw,mac_addr);
|
||||
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
|
||||
@ -4126,7 +4126,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
|
||||
|
||||
/* The UTA table only exists on 82599 hardware and newer */
|
||||
if (hw->mac.type < ixgbe_mac_82599EB)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
if(on) {
|
||||
for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
|
||||
@ -4175,10 +4175,10 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
|
||||
if (hw->mac.type == ixgbe_mac_82598EB) {
|
||||
PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
|
||||
" on 82599 hardware and newer");
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
if (ixgbe_vmdq_mode_check(hw) < 0)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
|
||||
|
||||
@ -4203,7 +4203,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
if (ixgbe_vmdq_mode_check(hw) < 0)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
|
||||
reg = IXGBE_READ_REG(hw, addr);
|
||||
@ -4230,7 +4230,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
if (ixgbe_vmdq_mode_check(hw) < 0)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
|
||||
reg = IXGBE_READ_REG(hw, addr);
|
||||
@ -4256,7 +4256,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
if (ixgbe_vmdq_mode_check(hw) < 0)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
|
||||
if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
|
||||
ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
|
||||
@ -4422,7 +4422,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
|
||||
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
|
||||
|
||||
if (ixgbe_vmdq_mode_check(hw) < 0)
|
||||
return (-ENOTSUP);
|
||||
return -ENOTSUP;
|
||||
|
||||
memset(&mr_info->mr_conf[rule_id], 0,
|
||||
sizeof(struct rte_eth_mirror_conf));
|
||||
|
@ -94,7 +94,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
|
||||
|
||||
m = __rte_mbuf_raw_alloc(mp);
|
||||
__rte_mbuf_sanity_check_raw(m, 0);
|
||||
return (m);
|
||||
return m;
|
||||
}
|
||||
|
||||
|
||||
@ -468,7 +468,7 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
|
||||
}
|
||||
|
||||
/* Mismatch, use the previous context */
|
||||
return (IXGBE_CTX_NUM);
|
||||
return IXGBE_CTX_NUM;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -561,7 +561,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
|
||||
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
|
||||
|
||||
/* No Error */
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
@ -683,7 +683,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (ixgbe_xmit_cleanup(txq) != 0) {
|
||||
/* Could not clean any descriptors */
|
||||
if (nb_tx == 0)
|
||||
return (0);
|
||||
return 0;
|
||||
goto end_of_tx;
|
||||
}
|
||||
|
||||
@ -712,7 +712,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
* descriptors
|
||||
*/
|
||||
if (nb_tx == 0)
|
||||
return (0);
|
||||
return 0;
|
||||
goto end_of_tx;
|
||||
}
|
||||
}
|
||||
@ -870,7 +870,7 @@ end_of_tx:
|
||||
IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
|
||||
txq->tx_tail = tx_id;
|
||||
|
||||
return (nb_tx);
|
||||
return nb_tx;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -1136,7 +1136,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
|
||||
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
|
||||
rxq->rx_free_thresh);
|
||||
if (unlikely(diag != 0))
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
rxdp = &rxq->rx_ring[alloc_idx];
|
||||
for (i = 0; i < rxq->rx_free_thresh; ++i) {
|
||||
@ -1458,7 +1458,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_hold = 0;
|
||||
}
|
||||
rxq->nb_rx_hold = nb_hold;
|
||||
return (nb_rx);
|
||||
return nb_rx;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2068,7 +2068,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (txq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate TX ring hardware descriptors. A memzone large enough to
|
||||
@ -2080,7 +2080,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
IXGBE_ALIGN, socket_id);
|
||||
if (tz == NULL) {
|
||||
ixgbe_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
@ -2117,7 +2117,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (txq->sw_ring == NULL) {
|
||||
ixgbe_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
|
||||
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
|
||||
@ -2130,7 +2130,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2347,7 +2347,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
|
||||
(nb_desc > IXGBE_MAX_RING_DESC) ||
|
||||
(nb_desc < IXGBE_MIN_RING_DESC)) {
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Free memory prior to re-allocation if needed... */
|
||||
@ -2360,7 +2360,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (rxq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
rxq->mb_pool = mp;
|
||||
rxq->nb_rx_desc = nb_desc;
|
||||
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
|
||||
@ -2382,7 +2382,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RX_RING_SZ, IXGBE_ALIGN, socket_id);
|
||||
if (rz == NULL) {
|
||||
ixgbe_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2439,7 +2439,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!rxq->sw_ring) {
|
||||
ixgbe_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2456,7 +2456,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!rxq->sw_sc_ring) {
|
||||
ixgbe_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
|
||||
@ -3584,7 +3584,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
|
||||
if (mbuf == NULL) {
|
||||
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
|
||||
(unsigned) rxq->queue_id);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rte_mbuf_refcnt_set(mbuf, 1);
|
||||
|
@ -573,7 +573,7 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
|
||||
/* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
|
||||
* has been requested. */
|
||||
if (priv->promisc_req)
|
||||
return (type == HASH_RXQ_FLOW_TYPE_PROMISC);
|
||||
return type == HASH_RXQ_FLOW_TYPE_PROMISC;
|
||||
switch (type) {
|
||||
case HASH_RXQ_FLOW_TYPE_PROMISC:
|
||||
return !!priv->promisc_req;
|
||||
|
@ -178,7 +178,7 @@ log2above(unsigned int v)
|
||||
|
||||
for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
|
||||
r |= (v & 1);
|
||||
return (l + r);
|
||||
return l + r;
|
||||
}
|
||||
|
||||
#endif /* RTE_PMD_MLX5_UTILS_H_ */
|
||||
|
@ -361,8 +361,8 @@ static inline int
|
||||
mpipe_link_compare(struct rte_eth_link *link1,
|
||||
struct rte_eth_link *link2)
|
||||
{
|
||||
return ((*(uint64_t *)link1 == *(uint64_t *)link2)
|
||||
? -1 : 0);
|
||||
return (*(uint64_t *)link1 == *(uint64_t *)link2)
|
||||
? -1 : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1275,7 +1275,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
(nb_desc > NFP_NET_MAX_RX_DESC) ||
|
||||
(nb_desc < NFP_NET_MIN_RX_DESC)) {
|
||||
RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1291,7 +1291,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (rxq == NULL)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
/* Hw queues mapping based on firmware confifguration */
|
||||
rxq->qidx = queue_idx;
|
||||
@ -1328,7 +1328,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (tz == NULL) {
|
||||
RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
|
||||
nfp_net_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Saving physical and virtual addresses for the RX ring */
|
||||
@ -1341,7 +1341,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (rxq->rxbufs == NULL) {
|
||||
nfp_net_rx_queue_release(rxq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
|
||||
@ -1379,7 +1379,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
|
||||
if (mbuf == NULL) {
|
||||
RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
|
||||
(unsigned)rxq->qidx);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
|
||||
@ -1457,7 +1457,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (txq == NULL) {
|
||||
RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1471,7 +1471,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (tz == NULL) {
|
||||
RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
|
||||
nfp_net_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->tx_count = nb_desc;
|
||||
@ -1499,7 +1499,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (txq->txbufs == NULL) {
|
||||
nfp_net_tx_queue_release(txq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
|
||||
txq->txbufs, txq->txds, (unsigned long int)txq->dma);
|
||||
|
@ -329,7 +329,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
if (vq == NULL) {
|
||||
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
|
||||
PMD_INIT_LOG(ERR, "%s: Can not allocate RX soft ring",
|
||||
@ -1229,7 +1229,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
|
||||
|
||||
if (rxmode->hw_ip_checksum) {
|
||||
PMD_DRV_LOG(ERR, "HW IP checksum not supported");
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->vlan_strip = rxmode->hw_vlan_strip;
|
||||
|
@ -306,7 +306,7 @@ legacy_virtio_has_msix(const struct rte_pci_addr *loc)
|
||||
if (d)
|
||||
closedir(d);
|
||||
|
||||
return (d != NULL);
|
||||
return d != NULL;
|
||||
}
|
||||
|
||||
/* Extract I/O port numbers from sysfs */
|
||||
|
@ -92,7 +92,7 @@ vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
|
||||
static inline bool
|
||||
vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
|
||||
{
|
||||
return (ring->next2comp == ring->next2fill);
|
||||
return ring->next2comp == ring->next2fill;
|
||||
}
|
||||
|
||||
typedef struct vmxnet3_comp_ring {
|
||||
|
@ -133,7 +133,7 @@ struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
|
||||
static inline int __attribute__((always_inline))
|
||||
virtqueue_full(const struct virtqueue *vq)
|
||||
{
|
||||
return (vq->vq_free_cnt == 0);
|
||||
return vq->vq_free_cnt == 0;
|
||||
}
|
||||
|
||||
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
|
||||
|
@ -276,7 +276,7 @@ cpu_core_map_get_n_lcores_linux(void)
|
||||
if (string == NULL)
|
||||
return -1;
|
||||
|
||||
return (atoi(++string) + 1);
|
||||
return atoi(++string) + 1;
|
||||
}
|
||||
|
||||
#define FILE_LINUX_CPU_CORE_ID \
|
||||
|
@ -380,7 +380,7 @@ pkt4_work(
|
||||
*pkt2_color = color3_2;
|
||||
*pkt3_color = color3_3;
|
||||
|
||||
return (drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3));
|
||||
return drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3);
|
||||
}
|
||||
|
||||
PIPELINE_TABLE_AH_HIT_DROP_TIME(fa_table_ah_hit, pkt_work, pkt4_work);
|
||||
|
@ -304,7 +304,7 @@ send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
|
||||
txmb->tail = 0;
|
||||
}
|
||||
|
||||
return (fill);
|
||||
return fill;
|
||||
}
|
||||
|
||||
/* Enqueue a single packet, and send burst if queue is filled */
|
||||
@ -335,7 +335,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
|
||||
if(++txmb->head == len)
|
||||
txmb->head = 0;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -561,13 +561,13 @@ parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
|
||||
errno = 0;
|
||||
v = strtoul(str, &end, 10);
|
||||
if (errno != 0 || *end != '\0')
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (v < min || v > max)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
*val = (uint32_t)v;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -583,20 +583,20 @@ parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
|
||||
errno = 0;
|
||||
v = strtoul(str, &end, 10);
|
||||
if (errno != 0)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (*end != '\0') {
|
||||
if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
|
||||
v *= MS_PER_S;
|
||||
else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (v < min || v > max)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
*val = (uint32_t)v;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -689,7 +689,7 @@ parse_args(int argc, char **argv)
|
||||
optarg,
|
||||
lgopts[option_index].name);
|
||||
print_usage(prgname);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -702,7 +702,7 @@ parse_args(int argc, char **argv)
|
||||
optarg,
|
||||
lgopts[option_index].name);
|
||||
print_usage(prgname);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ bitcnt(uint32_t v)
|
||||
for (n = 0; v != 0; v &= v - 1, n++)
|
||||
;
|
||||
|
||||
return (n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -278,13 +278,13 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
|
||||
|
||||
/* Create new mbuf for the header. */
|
||||
if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
|
||||
return (NULL);
|
||||
return NULL;
|
||||
|
||||
/* If requested, then make a new clone packet. */
|
||||
if (use_clone != 0 &&
|
||||
unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
|
||||
rte_pktmbuf_free(hdr);
|
||||
return (NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* prepend new header */
|
||||
@ -305,7 +305,7 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
|
||||
hdr->ol_flags = pkt->ol_flags;
|
||||
|
||||
__rte_mbuf_sanity_check(hdr, 1);
|
||||
return (hdr);
|
||||
return hdr;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -509,7 +509,7 @@ parse_portmask(const char *portmask)
|
||||
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
|
||||
return 0;
|
||||
|
||||
return ((uint32_t)pm);
|
||||
return (uint32_t)pm;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -523,9 +523,9 @@ parse_nqueue(const char *q_arg)
|
||||
n = strtoul(q_arg, &end, 0);
|
||||
if (errno != 0 || end == NULL || *end != '\0' ||
|
||||
n == 0 || n >= MAX_RX_QUEUE_PER_LCORE)
|
||||
return (-1);
|
||||
return -1;
|
||||
|
||||
return (n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/* Parse the argument given in the command line of the application */
|
||||
|
@ -377,7 +377,7 @@ ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len,
|
||||
init_val = rte_jhash_1word(k->ip_dst, init_val);
|
||||
init_val = rte_jhash_1word(*p, init_val);
|
||||
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
|
||||
return (init_val);
|
||||
return init_val;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@ -420,7 +420,7 @@ ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_va
|
||||
init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
|
||||
init_val = rte_jhash_1word(*p, init_val);
|
||||
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
|
||||
return (init_val);
|
||||
return init_val;
|
||||
}
|
||||
|
||||
#define IPV4_L3FWD_NUM_ROUTES \
|
||||
|
@ -104,7 +104,7 @@ init_mbuf_pools(void)
|
||||
pktmbuf_pool = rte_pktmbuf_pool_create(PKTMBUF_POOL_NAME, num_mbufs,
|
||||
MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
|
||||
|
||||
return (pktmbuf_pool == NULL); /* 0 on success */
|
||||
return pktmbuf_pool == NULL; /* 0 on success */
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -196,7 +196,7 @@ smp_parse_args(int argc, char **argv)
|
||||
ret = optind-1;
|
||||
optind = 0; /* reset getopt lib */
|
||||
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -215,7 +215,7 @@ netmap_port_open(uint32_t idx)
|
||||
err = rte_netmap_ioctl(port->fd, NIOCGINFO, &req);
|
||||
if (err) {
|
||||
printf("[E] NIOCGINFO ioctl failed (error %d)\n", err);
|
||||
return (err);
|
||||
return err;
|
||||
}
|
||||
|
||||
snprintf(req.nr_name, sizeof(req.nr_name), "%s", port->str);
|
||||
@ -225,7 +225,7 @@ netmap_port_open(uint32_t idx)
|
||||
err = rte_netmap_ioctl(port->fd, NIOCREGIF, &req);
|
||||
if (err) {
|
||||
printf("[E] NIOCREGIF ioctl failed (error %d)\n", err);
|
||||
return (err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* mmap only once. */
|
||||
@ -235,7 +235,7 @@ netmap_port_open(uint32_t idx)
|
||||
|
||||
if (ports.mem == MAP_FAILED) {
|
||||
printf("[E] NETMAP mmap failed for fd: %d)\n", port->fd);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
port->nmif = NETMAP_IF(ports.mem, req.nr_offset);
|
||||
@ -243,7 +243,7 @@ netmap_port_open(uint32_t idx)
|
||||
port->tx_ring = NETMAP_TXRING(port->nmif, 0);
|
||||
port->rx_ring = NETMAP_RXRING(port->nmif, 0);
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -138,10 +138,10 @@ ifname_to_portid(const char *ifname, uint8_t *port)
|
||||
portid = strtoul(ifname, &endptr, 10);
|
||||
if (endptr == ifname || *endptr != '\0' ||
|
||||
portid >= RTE_DIM(ports) || errno != 0)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
*port = (uint8_t)portid;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -196,10 +196,10 @@ fd_reserve(void)
|
||||
;
|
||||
|
||||
if (i == RTE_DIM(fd_port))
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
fd_port[i].port = FD_PORT_RSRV;
|
||||
return (IDX_TO_FD(i));
|
||||
return IDX_TO_FD(i);
|
||||
}
|
||||
|
||||
static int32_t
|
||||
@ -210,7 +210,7 @@ fd_release(int32_t fd)
|
||||
idx = FD_TO_IDX(fd);
|
||||
|
||||
if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
/* if we still have a valid port attached, release the port */
|
||||
if (port < RTE_DIM(ports) && ports[port].fd == idx) {
|
||||
@ -218,7 +218,7 @@ fd_release(int32_t fd)
|
||||
}
|
||||
|
||||
fd_port[idx].port = FD_PORT_FREE;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -228,26 +228,26 @@ check_nmreq(struct nmreq *req, uint8_t *port)
|
||||
uint8_t portid;
|
||||
|
||||
if (req == NULL)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (req->nr_version != NETMAP_API) {
|
||||
req->nr_version = NETMAP_API;
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) {
|
||||
RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" "
|
||||
"in NIOCGINFO call\n", req->nr_name);
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (ports[portid].pool == NULL) {
|
||||
RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*port = portid;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -268,7 +268,7 @@ ioctl_niocginfo(__rte_unused int fd, void * param)
|
||||
|
||||
req = (struct nmreq *)param;
|
||||
if ((rc = check_nmreq(req, &portid)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1);
|
||||
req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1);
|
||||
@ -279,7 +279,7 @@ ioctl_niocginfo(__rte_unused int fd, void * param)
|
||||
req->nr_memsize = netmap.mem_sz;
|
||||
req->nr_offset = 0;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -315,12 +315,12 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
|
||||
if (ports[port].fd < RTE_DIM(fd_port)) {
|
||||
RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
|
||||
port, IDX_TO_FD(ports[port].fd));
|
||||
return (-EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (fd_port[idx].port != FD_PORT_RSRV) {
|
||||
RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n",
|
||||
IDX_TO_FD(idx));
|
||||
return (-EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nmif = ports[port].nmif;
|
||||
@ -330,7 +330,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
|
||||
|
||||
/* only ALL rings supported right now. */
|
||||
if (req->nr_ringid != 0)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name);
|
||||
nmif->ni_version = req->nr_version;
|
||||
@ -380,7 +380,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
|
||||
RTE_LOG(ERR, USER1,
|
||||
"Couldn't start ethernet device %s (error %d)\n",
|
||||
req->nr_name, rc);
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* setup fdi <--> port relationtip. */
|
||||
@ -390,7 +390,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
|
||||
req->nr_memsize = netmap.mem_sz;
|
||||
req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -406,7 +406,7 @@ ioctl_niocregif(int32_t fd, void * param)
|
||||
|
||||
req = (struct nmreq *)param;
|
||||
if ((rc = check_nmreq(req, &portid)) != 0)
|
||||
return (rc);
|
||||
return rc;
|
||||
|
||||
idx = FD_TO_IDX(fd);
|
||||
|
||||
@ -414,7 +414,7 @@ ioctl_niocregif(int32_t fd, void * param)
|
||||
rc = netmap_regif(req, idx, portid);
|
||||
rte_spinlock_unlock(&netmap_lock);
|
||||
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -452,7 +452,7 @@ ioctl_niocunregif(int fd)
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&netmap_lock);
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -517,7 +517,7 @@ rx_sync_if(uint32_t port)
|
||||
rc += r->avail;
|
||||
}
|
||||
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -531,9 +531,9 @@ ioctl_niocrxsync(int fd)
|
||||
idx = FD_TO_IDX(fd);
|
||||
if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
|
||||
ports[port].fd == idx) {
|
||||
return (rx_sync_if(fd_port[idx].port));
|
||||
return rx_sync_if(fd_port[idx].port);
|
||||
} else {
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -612,7 +612,7 @@ tx_sync_if(uint32_t port)
|
||||
rc += r->avail;
|
||||
}
|
||||
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -626,9 +626,9 @@ ioctl_nioctxsync(int fd)
|
||||
idx = FD_TO_IDX(fd);
|
||||
if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
|
||||
ports[port].fd == idx) {
|
||||
return (tx_sync_if(fd_port[idx].port));
|
||||
return tx_sync_if(fd_port[idx].port);
|
||||
} else {
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -659,7 +659,7 @@ rte_netmap_init(const struct rte_netmap_conf *conf)
|
||||
RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
|
||||
RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
|
||||
__func__, sz);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
netmap.mem_sz = sz;
|
||||
@ -681,7 +681,7 @@ rte_netmap_init(const struct rte_netmap_conf *conf)
|
||||
fd_port[i].port = FD_PORT_FREE;
|
||||
}
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -698,7 +698,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
conf->nr_rx_rings > netmap.conf.max_rings) {
|
||||
RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
|
||||
__func__, portid);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
|
||||
@ -708,7 +708,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
rx_slots > netmap.conf.max_slots) {
|
||||
RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
|
||||
__func__, portid);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rte_eth_dev_configure(portid, conf->nr_rx_rings,
|
||||
@ -716,7 +716,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
|
||||
if (ret < 0) {
|
||||
RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < conf->nr_tx_rings; i++) {
|
||||
@ -728,7 +728,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
"Couldn't configure TX queue %"PRIu16" of "
|
||||
"port %"PRIu8"\n",
|
||||
i, portid);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
|
||||
@ -739,7 +739,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
"Couldn't configure RX queue %"PRIu16" of "
|
||||
"port %"PRIu8"\n",
|
||||
i, portid);
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -754,7 +754,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
|
||||
ports[portid].tx_burst = conf->tx_burst;
|
||||
ports[portid].rx_burst = conf->rx_burst;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -770,7 +770,7 @@ rte_netmap_close(__rte_unused int fd)
|
||||
errno =-rc;
|
||||
rc = -1;
|
||||
}
|
||||
return (rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int rte_netmap_ioctl(int fd, uint32_t op, void *param)
|
||||
@ -779,7 +779,7 @@ int rte_netmap_ioctl(int fd, uint32_t op, void *param)
|
||||
|
||||
if (!FD_VALID(fd)) {
|
||||
errno = EBADF;
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (op) {
|
||||
@ -815,7 +815,7 @@ int rte_netmap_ioctl(int fd, uint32_t op, void *param)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *
|
||||
@ -829,7 +829,7 @@ rte_netmap_mmap(void *addr, size_t length,
|
||||
((flags & MAP_FIXED) != 0 && addr != NULL)) {
|
||||
|
||||
errno = EINVAL;
|
||||
return (MAP_FAILED);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset);
|
||||
@ -852,7 +852,7 @@ rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags)
|
||||
errno = -fd;
|
||||
fd = -1;
|
||||
}
|
||||
return (fd);
|
||||
return fd;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,7 +153,7 @@ _lthread_queue_create(const char *name)
|
||||
static inline int __attribute__ ((always_inline))
|
||||
_lthread_queue_empty(struct lthread_queue *q)
|
||||
{
|
||||
return (q->tail == q->head);
|
||||
return q->tail == q->head;
|
||||
}
|
||||
|
||||
|
||||
|
@ -462,10 +462,10 @@ _sched_timer_cb(struct rte_timer *tim, void *arg)
|
||||
*/
|
||||
static inline int _lthread_sched_isdone(struct lthread_sched *sched)
|
||||
{
|
||||
return ((sched->run_flag == 0) &&
|
||||
return (sched->run_flag == 0) &&
|
||||
(_lthread_queue_empty(sched->ready)) &&
|
||||
(_lthread_queue_empty(sched->pready)) &&
|
||||
(sched->nb_blocked_threads == 0));
|
||||
(sched->nb_blocked_threads == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -112,7 +112,7 @@ app_usage(const char *prgname)
|
||||
|
||||
static inline int str_is(const char *str, const char *is)
|
||||
{
|
||||
return (strcmp(str, is) == 0);
|
||||
return strcmp(str, is) == 0;
|
||||
}
|
||||
|
||||
/* returns core mask used by DPDK */
|
||||
|
@ -53,7 +53,7 @@ extern struct rte_mempool *mbuf_pool;
|
||||
static inline int
|
||||
is_bit_set(int i, unsigned int mask)
|
||||
{
|
||||
return ((1 << i) & mask);
|
||||
return (1 << i) & mask;
|
||||
}
|
||||
|
||||
#endif /* _MAIN_H_ */
|
||||
|
@ -911,7 +911,7 @@ gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa,
|
||||
static inline int __attribute__((always_inline))
|
||||
ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
|
||||
{
|
||||
return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
|
||||
return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2281,7 +2281,7 @@ alloc_data_ll(uint32_t size)
|
||||
}
|
||||
ll_new[i].next = NULL;
|
||||
|
||||
return (ll_new);
|
||||
return ll_new;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -690,7 +690,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
|
||||
static inline int __attribute__((always_inline))
|
||||
ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
|
||||
{
|
||||
return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
|
||||
return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1184,7 +1184,7 @@ alloc_data_ll(uint32_t size)
|
||||
}
|
||||
ll_new[i].next = NULL;
|
||||
|
||||
return (ll_new);
|
||||
return ll_new;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -97,14 +97,14 @@ init_watch(void)
|
||||
xs = xs_daemon_open();
|
||||
if (xs == NULL) {
|
||||
RTE_LOG(ERR, XENHOST, "xs_daemon_open failed\n");
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = xs_watch(xs, "/local/domain", "mytoken");
|
||||
if (ret == 0) {
|
||||
RTE_LOG(ERR, XENHOST, "%s: xs_watch failed\n", __func__);
|
||||
xs_daemon_close(xs);
|
||||
return (-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* We are notified of read availability on the watch via the file descriptor. */
|
||||
@ -126,7 +126,7 @@ get_xen_guest(int dom_id)
|
||||
return guest;
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1157,8 +1157,8 @@ rule_cmp_wildness(struct rte_acl_build_rule *r1, struct rte_acl_build_rule *r2)
|
||||
int field_index = r1->config->defs[n].field_index;
|
||||
|
||||
if (r1->wildness[field_index] != r2->wildness[field_index])
|
||||
return (r1->wildness[field_index] -
|
||||
r2->wildness[field_index]);
|
||||
return r1->wildness[field_index] -
|
||||
r2->wildness[field_index];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ resolve_priority_neon(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
|
||||
static inline __attribute__((always_inline)) uint32_t
|
||||
check_any_match_x4(uint64_t val[])
|
||||
{
|
||||
return ((val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH);
|
||||
return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void
|
||||
|
@ -306,7 +306,7 @@ _get_section(struct rte_cfgfile *cfg, const char *sectionname)
|
||||
int
|
||||
rte_cfgfile_has_section(struct rte_cfgfile *cfg, const char *sectionname)
|
||||
{
|
||||
return (_get_section(cfg, sectionname) != NULL);
|
||||
return _get_section(cfg, sectionname) != NULL;
|
||||
}
|
||||
|
||||
int
|
||||
@ -352,5 +352,5 @@ int
|
||||
rte_cfgfile_has_entry(struct rte_cfgfile *cfg, const char *sectionname,
|
||||
const char *entryname)
|
||||
{
|
||||
return (rte_cfgfile_get_entry(cfg, sectionname, entryname) != NULL);
|
||||
return rte_cfgfile_get_entry(cfg, sectionname, entryname) != NULL;
|
||||
}
|
||||
|
@ -474,7 +474,7 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
|
||||
if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
|
||||
CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
|
||||
nb_qpairs, dev->data->dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->data->queue_pairs == NULL) { /* first time configuration */
|
||||
@ -601,7 +601,7 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &rte_crypto_devices[dev_id];
|
||||
@ -609,7 +609,7 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
|
||||
if (dev->data->dev_started) {
|
||||
CDEV_LOG_ERR(
|
||||
"device %d must be stopped to allow configuration", dev_id);
|
||||
return (-EBUSY);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Setup new number of queue pairs and reconfigure device. */
|
||||
@ -643,7 +643,7 @@ rte_cryptodev_start(uint8_t dev_id)
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &rte_crypto_devices[dev_id];
|
||||
@ -755,13 +755,13 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &rte_crypto_devices[dev_id];
|
||||
if (queue_pair_id >= dev->data->nb_queue_pairs) {
|
||||
CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->data->dev_started) {
|
||||
@ -784,7 +784,7 @@ rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
|
||||
return (-ENODEV);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (stats == NULL) {
|
||||
@ -849,11 +849,11 @@ rte_cryptodev_callback_register(uint8_t dev_id,
|
||||
struct rte_cryptodev_callback *user_cb;
|
||||
|
||||
if (!cb_fn)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &rte_crypto_devices[dev_id];
|
||||
@ -880,7 +880,7 @@ rte_cryptodev_callback_register(uint8_t dev_id,
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&rte_cryptodev_cb_lock);
|
||||
return ((user_cb == NULL) ? -ENOMEM : 0);
|
||||
return (user_cb == NULL) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -893,11 +893,11 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
|
||||
struct rte_cryptodev_callback *cb, *next;
|
||||
|
||||
if (!cb_fn)
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
|
||||
CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
|
||||
return (-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &rte_crypto_devices[dev_id];
|
||||
|
@ -75,5 +75,5 @@ int
|
||||
eal_cpu_detected(unsigned lcore_id)
|
||||
{
|
||||
const unsigned ncpus = eal_get_ncpus();
|
||||
return (lcore_id < ncpus);
|
||||
return lcore_id < ncpus;
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ find_heap_max_free_elem(int *s, unsigned align)
|
||||
}
|
||||
}
|
||||
|
||||
return (len - MALLOC_ELEM_OVERHEAD - align);
|
||||
return len - MALLOC_ELEM_OVERHEAD - align;
|
||||
}
|
||||
|
||||
static const struct rte_memzone *
|
||||
|
@ -109,12 +109,12 @@ rte_atomic16_dec(rte_atomic16_t *v)
|
||||
|
||||
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
|
||||
{
|
||||
return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
|
||||
return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
|
||||
{
|
||||
return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
|
||||
return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
|
||||
}
|
||||
|
||||
/*------------------------- 32 bit atomic operations -------------------------*/
|
||||
@ -198,7 +198,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
|
||||
: [cnt] "r" (&v->cnt)
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return (ret == 0);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
|
||||
@ -216,7 +216,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
|
||||
: [cnt] "r" (&v->cnt)
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return (ret == 0);
|
||||
return ret == 0;
|
||||
}
|
||||
/*------------------------- 64 bit atomic operations -------------------------*/
|
||||
|
||||
@ -387,7 +387,7 @@ static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
|
||||
: [cnt] "r" (&v->cnt)
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return (ret == 0);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
|
||||
@ -405,7 +405,7 @@ static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
|
||||
: [cnt] "r" (&v->cnt)
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return (ret == 0);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
|
||||
|
@ -51,7 +51,7 @@ extern "C" {
|
||||
*/
|
||||
static inline uint16_t rte_arch_bswap16(uint16_t _x)
|
||||
{
|
||||
return ((_x >> 8) | ((_x << 8) & 0xff00));
|
||||
return (_x >> 8) | ((_x << 8) & 0xff00);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -61,8 +61,8 @@ static inline uint16_t rte_arch_bswap16(uint16_t _x)
|
||||
*/
|
||||
static inline uint32_t rte_arch_bswap32(uint32_t _x)
|
||||
{
|
||||
return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
|
||||
((_x << 24) & 0xff000000));
|
||||
return (_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
|
||||
((_x << 24) & 0xff000000);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -73,10 +73,10 @@ static inline uint32_t rte_arch_bswap32(uint32_t _x)
|
||||
/* 64-bit mode */
|
||||
static inline uint64_t rte_arch_bswap64(uint64_t _x)
|
||||
{
|
||||
return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
|
||||
return (_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
|
||||
((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
|
||||
((_x << 24) & (0xffULL << 40)) |
|
||||
((_x << 40) & (0xffULL << 48)) | ((_x << 56)));
|
||||
((_x << 40) & (0xffULL << 48)) | ((_x << 56));
|
||||
}
|
||||
|
||||
#ifndef RTE_FORCE_INTRINSICS
|
||||
|
@ -61,7 +61,7 @@ rte_spinlock_unlock(rte_spinlock_t *sl)
|
||||
static inline int
|
||||
rte_spinlock_trylock(rte_spinlock_t *sl)
|
||||
{
|
||||
return (__sync_lock_test_and_set(&sl->locked, 1) == 0);
|
||||
return __sync_lock_test_and_set(&sl->locked, 1) == 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -118,7 +118,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
|
||||
: [cnt] "+m" (v->cnt), /* output */
|
||||
[ret] "=qm" (ret)
|
||||
);
|
||||
return (ret != 0);
|
||||
return ret != 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
|
||||
@ -131,7 +131,7 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
|
||||
: [cnt] "+m" (v->cnt), /* output */
|
||||
[ret] "=qm" (ret)
|
||||
);
|
||||
return (ret != 0);
|
||||
return ret != 0;
|
||||
}
|
||||
|
||||
/*------------------------- 32 bit atomic operations -------------------------*/
|
||||
@ -192,7 +192,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
|
||||
: [cnt] "+m" (v->cnt), /* output */
|
||||
[ret] "=qm" (ret)
|
||||
);
|
||||
return (ret != 0);
|
||||
return ret != 0;
|
||||
}
|
||||
|
||||
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
|
||||
@ -205,7 +205,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
|
||||
: [cnt] "+m" (v->cnt), /* output */
|
||||
[ret] "=qm" (ret)
|
||||
);
|
||||
return (ret != 0);
|
||||
return ret != 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -90,7 +90,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
|
||||
: "[lockval]" (lockval)
|
||||
: "memory");
|
||||
|
||||
return (lockval == 0);
|
||||
return lockval == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -309,7 +309,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v);
|
||||
#ifdef RTE_FORCE_INTRINSICS
|
||||
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
|
||||
{
|
||||
return (__sync_add_and_fetch(&v->cnt, 1) == 0);
|
||||
return __sync_add_and_fetch(&v->cnt, 1) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -329,7 +329,7 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v);
|
||||
#ifdef RTE_FORCE_INTRINSICS
|
||||
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
|
||||
{
|
||||
return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
|
||||
return __sync_sub_and_fetch(&v->cnt, 1) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -562,7 +562,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v);
|
||||
#ifdef RTE_FORCE_INTRINSICS
|
||||
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
|
||||
{
|
||||
return (__sync_add_and_fetch(&v->cnt, 1) == 0);
|
||||
return __sync_add_and_fetch(&v->cnt, 1) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -582,7 +582,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v);
|
||||
#ifdef RTE_FORCE_INTRINSICS
|
||||
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
|
||||
{
|
||||
return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
|
||||
return __sync_sub_and_fetch(&v->cnt, 1) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -127,7 +127,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl);
|
||||
static inline int
|
||||
rte_spinlock_trylock (rte_spinlock_t *sl)
|
||||
{
|
||||
return (__sync_lock_test_and_set(&sl->locked,1) == 0);
|
||||
return __sync_lock_test_and_set(&sl->locked,1) == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -175,7 +175,7 @@ rte_lcore_is_enabled(unsigned lcore_id)
|
||||
struct rte_config *cfg = rte_eal_get_configuration();
|
||||
if (lcore_id >= RTE_MAX_LCORE)
|
||||
return 0;
|
||||
return (cfg->lcore_role[lcore_id] != ROLE_OFF);
|
||||
return cfg->lcore_role[lcore_id] != ROLE_OFF;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -170,8 +170,8 @@ malloc_elem_free_list_index(size_t size)
|
||||
index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
|
||||
MALLOC_LOG2_INCREMENT;
|
||||
|
||||
return (index <= RTE_HEAP_NUM_FREELISTS-1?
|
||||
index: RTE_HEAP_NUM_FREELISTS-1);
|
||||
return index <= RTE_HEAP_NUM_FREELISTS-1?
|
||||
index: RTE_HEAP_NUM_FREELISTS-1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -104,9 +104,9 @@ set_trailer(struct malloc_elem *elem)
|
||||
static inline int
|
||||
malloc_elem_cookies_ok(const struct malloc_elem *elem)
|
||||
{
|
||||
return (elem != NULL &&
|
||||
return elem != NULL &&
|
||||
MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE &&
|
||||
MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE);
|
||||
MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -87,7 +87,7 @@ check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
|
||||
check_flag = RTE_MEMZONE_16GB;
|
||||
}
|
||||
|
||||
return (check_flag & flags);
|
||||
return check_flag & flags;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -109,8 +109,8 @@ TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
|
||||
static int
|
||||
is_ivshmem_device(struct rte_pci_device * dev)
|
||||
{
|
||||
return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
|
||||
&& dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
|
||||
return dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
|
||||
&& dev->id.device_id == PCI_DEVICE_ID_IVSHMEM;
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -171,7 +171,7 @@ rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
|
||||
mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
|
||||
|
||||
/** return mechine address */
|
||||
return (mfn * PAGE_SIZE + phy_addr % PAGE_SIZE);
|
||||
return mfn * PAGE_SIZE + phy_addr % PAGE_SIZE;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1628,7 +1628,7 @@ static int igb_get_i2c_data(void *data)
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
|
||||
|
||||
return ((i2cctl & E1000_I2C_DATA_IN) != 0);
|
||||
return (i2cctl & E1000_I2C_DATA_IN) != 0;
|
||||
}
|
||||
|
||||
/* igb_set_i2c_data - Sets the I2C data bit
|
||||
@ -1690,7 +1690,7 @@ static int igb_get_i2c_clk(void *data)
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
|
||||
|
||||
return ((i2cctl & E1000_I2C_CLK_IN) != 0);
|
||||
return (i2cctl & E1000_I2C_CLK_IN) != 0;
|
||||
}
|
||||
|
||||
static const struct i2c_algo_bit_data igb_i2c_algo = {
|
||||
@ -8044,7 +8044,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
|
||||
igb_lro_flush_all(q_vector);
|
||||
|
||||
#endif /* IGB_NO_LRO */
|
||||
return (total_packets < budget);
|
||||
return total_packets < budget;
|
||||
}
|
||||
#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
|
||||
/**
|
||||
@ -8352,7 +8352,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
|
||||
igb_lro_flush_all(q_vector);
|
||||
|
||||
#endif /* IGB_NO_LRO */
|
||||
return (total_packets < budget);
|
||||
return total_packets < budget;
|
||||
}
|
||||
#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
|
||||
|
||||
|
@ -2279,7 +2279,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
|
||||
/* set the correct pool for the new PF MAC address in entry 0 */
|
||||
ret = ixgbe_add_mac_filter(adapter, hw->mac.addr,
|
||||
adapter->num_vfs);
|
||||
return (ret > 0 ? 0 : ret);
|
||||
return ret > 0 ? 0 : ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -444,8 +444,8 @@ u64
|
||||
_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
|
||||
size_t size, int direction)
|
||||
{
|
||||
return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
|
||||
PCI_DRAM_OFFSET);
|
||||
return ((u64) (page - mem_map) << PAGE_SHIFT) + offset +
|
||||
PCI_DRAM_OFFSET;
|
||||
}
|
||||
|
||||
#else /* CONFIG_HIGHMEM */
|
||||
|
@ -439,7 +439,7 @@ kni_sock_rcvmsg(struct socket *sock,
|
||||
KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
|
||||
(unsigned long)len, q->flags, pkt_len);
|
||||
|
||||
return (pkt_len + vnet_hdr_len);
|
||||
return pkt_len + vnet_hdr_len;
|
||||
}
|
||||
|
||||
/* dummy tap like ioctl */
|
||||
|
@ -145,7 +145,7 @@ static inline int is_zero_ether_addr(const struct ether_addr *ea)
|
||||
*/
|
||||
static inline int is_unicast_ether_addr(const struct ether_addr *ea)
|
||||
{
|
||||
return ((ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0);
|
||||
return (ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -160,7 +160,7 @@ static inline int is_unicast_ether_addr(const struct ether_addr *ea)
|
||||
*/
|
||||
static inline int is_multicast_ether_addr(const struct ether_addr *ea)
|
||||
{
|
||||
return (ea->addr_bytes[0] & ETHER_GROUP_ADDR);
|
||||
return ea->addr_bytes[0] & ETHER_GROUP_ADDR;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -193,7 +193,7 @@ static inline int is_broadcast_ether_addr(const struct ether_addr *ea)
|
||||
*/
|
||||
static inline int is_universal_ether_addr(const struct ether_addr *ea)
|
||||
{
|
||||
return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0);
|
||||
return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -208,7 +208,7 @@ static inline int is_universal_ether_addr(const struct ether_addr *ea)
|
||||
*/
|
||||
static inline int is_local_admin_ether_addr(const struct ether_addr *ea)
|
||||
{
|
||||
return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0);
|
||||
return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -224,7 +224,7 @@ static inline int is_local_admin_ether_addr(const struct ether_addr *ea)
|
||||
*/
|
||||
static inline int is_valid_assigned_ether_addr(const struct ether_addr *ea)
|
||||
{
|
||||
return (is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea)));
|
||||
return is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -44,7 +44,7 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len __rte_unu
|
||||
#else
|
||||
const __m128i x = _mm_cmpeq_epi32(k1, k2);
|
||||
|
||||
return (_mm_movemask_epi8(x) != 0xffff);
|
||||
return _mm_movemask_epi8(x) != 0xffff;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ rte_hash_secondary_hash(const hash_sig_t primary_hash)
|
||||
|
||||
uint32_t tag = primary_hash >> all_bits_shift;
|
||||
|
||||
return (primary_hash ^ ((tag + 1) * alt_bits_xor));
|
||||
return primary_hash ^ ((tag + 1) * alt_bits_xor);
|
||||
}
|
||||
|
||||
void
|
||||
@ -603,7 +603,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (prim_bkt->key_idx[i] - 1);
|
||||
return prim_bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -623,7 +623,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (sec_bkt->key_idx[i] - 1);
|
||||
return sec_bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -655,7 +655,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
|
||||
prim_bkt->signatures[ret].current = sig;
|
||||
prim_bkt->signatures[ret].alt = alt_hash;
|
||||
prim_bkt->key_idx[ret] = new_idx;
|
||||
return (new_idx - 1);
|
||||
return new_idx - 1;
|
||||
}
|
||||
|
||||
/* Error in addition, store new slot back in the ring and return error */
|
||||
@ -732,7 +732,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (bkt->key_idx[i] - 1);
|
||||
return bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -755,7 +755,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (bkt->key_idx[i] - 1);
|
||||
return bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -847,7 +847,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (bkt->key_idx[i] - 1);
|
||||
return bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -870,7 +870,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
|
||||
* Return index where key is stored,
|
||||
* substracting the first dummy index
|
||||
*/
|
||||
return (bkt->key_idx[i] - 1);
|
||||
return bkt->key_idx[i] - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1239,5 +1239,5 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
|
||||
/* Increment iterator */
|
||||
(*next)++;
|
||||
|
||||
return (position - 1);
|
||||
return position - 1;
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
|
||||
IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
|
||||
|
||||
if (ip_frag_key_cmp(key, &p1[i].key) == 0)
|
||||
return (p1 + i);
|
||||
return p1 + i;
|
||||
else if (ip_frag_key_is_empty(&p1[i].key))
|
||||
empty = (empty == NULL) ? (p1 + i) : empty;
|
||||
else if (max_cycles + p1[i].start < tms)
|
||||
@ -405,7 +405,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
|
||||
IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
|
||||
|
||||
if (ip_frag_key_cmp(key, &p2[i].key) == 0)
|
||||
return (p2 + i);
|
||||
return p2 + i;
|
||||
else if (ip_frag_key_is_empty(&p2[i].key))
|
||||
empty = (empty == NULL) ?( p2 + i) : empty;
|
||||
else if (max_cycles + p2[i].start < tms)
|
||||
|
@ -113,7 +113,7 @@ depth_to_range(uint8_t depth)
|
||||
return 1 << (MAX_DEPTH_TBL24 - depth);
|
||||
|
||||
/* Else if depth is greater than 24 */
|
||||
return (1 << (RTE_LPM_MAX_DEPTH - depth));
|
||||
return 1 << (RTE_LPM_MAX_DEPTH - depth);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1260,7 +1260,7 @@ rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
|
||||
uintptr_t off;
|
||||
|
||||
off = (const char *)elt - (const char *)mp->elt_va_start;
|
||||
return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask));
|
||||
return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
|
||||
} else {
|
||||
/*
|
||||
* If huge pages are disabled, we cannot assume the
|
||||
|
@ -1037,7 +1037,7 @@ rte_ring_full(const struct rte_ring *r)
|
||||
{
|
||||
uint32_t prod_tail = r->prod.tail;
|
||||
uint32_t cons_tail = r->cons.tail;
|
||||
return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
|
||||
return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1070,7 +1070,7 @@ rte_ring_count(const struct rte_ring *r)
|
||||
{
|
||||
uint32_t prod_tail = r->prod.tail;
|
||||
uint32_t cons_tail = r->cons.tail;
|
||||
return ((prod_tail - cons_tail) & r->prod.mask);
|
||||
return (prod_tail - cons_tail) & r->prod.mask;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1086,7 +1086,7 @@ rte_ring_free_count(const struct rte_ring *r)
|
||||
{
|
||||
uint32_t prod_tail = r->prod.tail;
|
||||
uint32_t cons_tail = r->cons.tail;
|
||||
return ((cons_tail - prod_tail - 1) & r->prod.mask);
|
||||
return (cons_tail - prod_tail - 1) & r->prod.mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -50,13 +50,13 @@
|
||||
static inline uint32_t
|
||||
less(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
|
||||
{
|
||||
return (a*d < b*c);
|
||||
return a*d < b*c;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
less_or_equal(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
|
||||
{
|
||||
return (a*d <= b*c);
|
||||
return a*d <= b*c;
|
||||
}
|
||||
|
||||
/* check whether a/b is a valid approximation */
|
||||
|
@ -115,7 +115,7 @@ __rte_bitmap_index1_inc(struct rte_bitmap *bmp)
|
||||
static inline uint64_t
|
||||
__rte_bitmap_mask1_get(struct rte_bitmap *bmp)
|
||||
{
|
||||
return ((~1lu) << bmp->offset1);
|
||||
return (~1lu) << bmp->offset1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -344,7 +344,7 @@ rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
|
||||
index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
|
||||
offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
|
||||
slab2 = bmp->array2 + index2;
|
||||
return ((*slab2) & (1lu << offset2));
|
||||
return (*slab2) & (1lu << offset2);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -412,7 +412,7 @@ __rte_bitmap_line_not_empty(uint64_t *slab2)
|
||||
v1 |= v2;
|
||||
v3 |= v4;
|
||||
|
||||
return (v1 | v3);
|
||||
return v1 | v3;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,7 +163,7 @@ static inline uint32_t
|
||||
rte_fast_rand(void)
|
||||
{
|
||||
rte_red_rand_seed = (214013 * rte_red_rand_seed) + 2531011;
|
||||
return (rte_red_rand_seed >> 10);
|
||||
return rte_red_rand_seed >> 10;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -457,7 +457,7 @@ rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
|
||||
size0 = sizeof(struct rte_sched_port);
|
||||
size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
|
||||
|
||||
return (size0 + size1);
|
||||
return size0 + size1;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1057,7 +1057,7 @@ rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
|
||||
{
|
||||
struct rte_sched_queue *queue = port->queue + qindex;
|
||||
|
||||
return (queue->qr == queue->qw);
|
||||
return queue->qr == queue->qw;
|
||||
}
|
||||
|
||||
#endif /* RTE_SCHED_DEBUG */
|
||||
|
Loading…
x
Reference in New Issue
Block a user