eal: add assert macro for debug
The macro RTE_VERIFY always checks a condition. It is optimized with "unlikely" hint. While this macro is well suited for test applications, it is preferred in libraries and examples to enable such check in debug mode. That's why the macro RTE_ASSERT is introduced to call RTE_VERIFY only if built with debug logs enabled. A lot of assert macros were duplicated and enabled with a specific flag. Removing these #ifdef allows to test these code branches more easily and avoid dead code pitfalls. The ENA_ASSERT is kept (in debug mode only) because it has more parameters to log. Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
This commit is contained in:
parent
1f49ec153c
commit
50705e8e3c
@ -193,7 +193,7 @@ Firstly, the Ethernet* header is removed from the packet and the IPv4 address is
|
||||
/* Remove the Ethernet header from the input packet */
|
||||
|
||||
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct ether_hdr));
|
||||
RTE_MBUF_ASSERT(iphdr != NULL);
|
||||
RTE_ASSERT(iphdr != NULL);
|
||||
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
|
||||
|
||||
Then, the packet is checked to see if it has a multicast destination address and
|
||||
@ -271,7 +271,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:
|
||||
|
||||
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(*ethdr));
|
||||
|
||||
RTE_MBUF_ASSERT(ethdr != NULL);
|
||||
RTE_ASSERT(ethdr != NULL);
|
||||
|
||||
ether_addr_copy(dest_addr, ðdr->d_addr);
|
||||
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
|
||||
|
@ -512,7 +512,7 @@ mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
|
||||
|
||||
if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
|
||||
/* attach mux to aggregator */
|
||||
RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
|
||||
RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
|
||||
STATE_DISTRIBUTING)) == 0);
|
||||
|
||||
ACTOR_STATE_SET(port, SYNCHRONIZATION);
|
||||
@ -813,7 +813,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
|
||||
struct lacpdu_header *lacp;
|
||||
|
||||
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
|
||||
RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
|
||||
RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
|
||||
|
||||
/* This is LACP frame so pass it to rx_machine */
|
||||
rx_machine(internals, slave_id, &lacp->lacpdu);
|
||||
@ -856,8 +856,9 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
|
||||
uint16_t q_id;
|
||||
|
||||
/* Given slave mus not be in active list */
|
||||
RTE_VERIFY(find_slave_by_id(internals->active_slaves,
|
||||
RTE_ASSERT(find_slave_by_id(internals->active_slaves,
|
||||
internals->active_slave_count, slave_id) == internals->active_slave_count);
|
||||
RTE_SET_USED(internals); /* used only for assert when enabled */
|
||||
|
||||
memcpy(&port->actor, &initial, sizeof(struct port_params));
|
||||
/* Standard requires that port ID must be grater than 0.
|
||||
@ -880,8 +881,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
|
||||
if (port->mbuf_pool != NULL)
|
||||
return;
|
||||
|
||||
RTE_VERIFY(port->rx_ring == NULL);
|
||||
RTE_VERIFY(port->tx_ring == NULL);
|
||||
RTE_ASSERT(port->rx_ring == NULL);
|
||||
RTE_ASSERT(port->tx_ring == NULL);
|
||||
socket_id = rte_eth_devices[slave_id].data->numa_node;
|
||||
|
||||
element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
|
||||
@ -939,7 +940,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
|
||||
uint8_t i;
|
||||
|
||||
/* Given slave must be in active list */
|
||||
RTE_VERIFY(find_slave_by_id(internals->active_slaves,
|
||||
RTE_ASSERT(find_slave_by_id(internals->active_slaves,
|
||||
internals->active_slave_count, slave_id) < internals->active_slave_count);
|
||||
|
||||
/* Exclude slave from transmit policy. If this slave is an aggregator
|
||||
|
@ -95,7 +95,7 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
|
||||
internals->tlb_slaves_order[active_count] = port_id;
|
||||
}
|
||||
|
||||
RTE_VERIFY(internals->active_slave_count <
|
||||
RTE_ASSERT(internals->active_slave_count <
|
||||
(RTE_DIM(internals->active_slaves) - 1));
|
||||
|
||||
internals->active_slaves[internals->active_slave_count] = port_id;
|
||||
@ -134,7 +134,7 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
|
||||
sizeof(internals->active_slaves[0]));
|
||||
}
|
||||
|
||||
RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
|
||||
RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
|
||||
internals->active_slave_count = active_count;
|
||||
|
||||
if (eth_dev->data->dev_started) {
|
||||
|
@ -1608,11 +1608,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
|
||||
for (i = 0; i < internals->active_slave_count; i++) {
|
||||
port = &mode_8023ad_ports[internals->active_slaves[i]];
|
||||
|
||||
RTE_VERIFY(port->rx_ring != NULL);
|
||||
RTE_ASSERT(port->rx_ring != NULL);
|
||||
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
|
||||
rte_pktmbuf_free(pkt);
|
||||
|
||||
RTE_VERIFY(port->tx_ring != NULL);
|
||||
RTE_ASSERT(port->tx_ring != NULL);
|
||||
while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
|
||||
rte_pktmbuf_free(pkt);
|
||||
}
|
||||
|
@ -93,14 +93,18 @@ typedef uint64_t dma_addr_t;
|
||||
#define ENA_GET_SYSTEM_USECS() \
|
||||
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
|
||||
|
||||
#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
|
||||
#define ENA_ASSERT(cond, format, arg...) \
|
||||
do { \
|
||||
if (unlikely(!(cond))) { \
|
||||
printf("Assertion failed on %s:%s:%d: " format, \
|
||||
__FILE__, __func__, __LINE__, ##arg); \
|
||||
rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \
|
||||
RTE_LOG(ERR, PMD, format, ##arg); \
|
||||
rte_panic("line %d\tassert \"" #cond "\"" \
|
||||
"failed\n", __LINE__); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
#define ENA_ASSERT(cond, format, arg...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define ENA_MAX32(x, y) RTE_MAX((x), (y))
|
||||
#define ENA_MAX16(x, y) RTE_MAX((x), (y))
|
||||
|
@ -155,16 +155,6 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
|
||||
return (struct enic *)eth_dev->data->dev_private;
|
||||
}
|
||||
|
||||
#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
|
||||
#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
|
||||
#define ASSERT(x) do { \
|
||||
if (!(x)) \
|
||||
rte_panic("ENIC: x"); \
|
||||
} while (0)
|
||||
#else
|
||||
#define ASSERT(x)
|
||||
#endif
|
||||
|
||||
extern void enic_fdir_stats_get(struct enic *enic,
|
||||
struct rte_eth_fdir_stats *stats);
|
||||
extern int enic_fdir_add_fltr(struct enic *enic,
|
||||
|
@ -238,8 +238,8 @@ static inline uint32_t
|
||||
enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
|
||||
{
|
||||
uint32_t d = i0 + i1;
|
||||
ASSERT(i0 < n_descriptors);
|
||||
ASSERT(i1 < n_descriptors);
|
||||
RTE_ASSERT(i0 < n_descriptors);
|
||||
RTE_ASSERT(i1 < n_descriptors);
|
||||
d -= (d >= n_descriptors) ? n_descriptors : 0;
|
||||
return d;
|
||||
}
|
||||
|
@ -34,14 +34,6 @@
|
||||
#ifndef _VMXNET3_ETHDEV_H_
|
||||
#define _VMXNET3_ETHDEV_H_
|
||||
|
||||
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
|
||||
#define VMXNET3_ASSERT(x) do { \
|
||||
if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
|
||||
} while(0)
|
||||
#else
|
||||
#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
|
||||
#endif
|
||||
|
||||
#define VMXNET3_MAX_MAC_ADDRS 1
|
||||
|
||||
/* UPT feature to negotiate */
|
||||
|
@ -296,7 +296,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
|
||||
struct rte_mbuf *mbuf;
|
||||
|
||||
/* Release cmd_ring descriptor and free mbuf */
|
||||
VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
|
||||
RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
|
||||
|
||||
mbuf = txq->cmd_ring.buf_info[eop_idx].m;
|
||||
if (mbuf == NULL)
|
||||
@ -307,7 +307,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
|
||||
|
||||
while (txq->cmd_ring.next2comp != eop_idx) {
|
||||
/* no out-of-order completion */
|
||||
VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
|
||||
RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
|
||||
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
|
||||
completed++;
|
||||
}
|
||||
@ -454,7 +454,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (tso) {
|
||||
uint16_t mss = txm->tso_segsz;
|
||||
|
||||
VMXNET3_ASSERT(mss > 0);
|
||||
RTE_ASSERT(mss > 0);
|
||||
|
||||
gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
|
||||
gdesc->txd.om = VMXNET3_OM_TSO;
|
||||
@ -658,12 +658,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
idx = rcd->rxdIdx;
|
||||
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
|
||||
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
|
||||
RTE_SET_USED(rxd); /* used only for assert when enabled */
|
||||
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
|
||||
|
||||
PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
|
||||
|
||||
VMXNET3_ASSERT(rcd->len <= rxd->len);
|
||||
VMXNET3_ASSERT(rbi->m);
|
||||
RTE_ASSERT(rcd->len <= rxd->len);
|
||||
RTE_ASSERT(rbi->m);
|
||||
|
||||
/* Get the packet buffer pointer from buf_info */
|
||||
rxm = rbi->m;
|
||||
@ -710,10 +711,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
* the last mbuf of the current packet.
|
||||
*/
|
||||
if (rcd->sop) {
|
||||
VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
|
||||
RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
|
||||
|
||||
if (unlikely(rcd->len == 0)) {
|
||||
VMXNET3_ASSERT(rcd->eop);
|
||||
RTE_ASSERT(rcd->eop);
|
||||
|
||||
PMD_RX_LOG(DEBUG,
|
||||
"Rx buf was skipped. rxring[%d][%d])",
|
||||
@ -727,7 +728,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
} else {
|
||||
struct rte_mbuf *start = rxq->start_seg;
|
||||
|
||||
VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
|
||||
RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
|
||||
|
||||
start->pkt_len += rxm->data_len;
|
||||
start->nb_segs++;
|
||||
|
@ -202,7 +202,7 @@ _create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
|
||||
obj_init, obj_init_arg,
|
||||
socket_id, flags, va, pa_arr, rpg_num, pg_shift);
|
||||
|
||||
RTE_VERIFY(elt_num == mp->size);
|
||||
RTE_ASSERT(elt_num == mp->size);
|
||||
}
|
||||
mgi.mp = mp;
|
||||
mgi.pg_num = rpg_num;
|
||||
|
@ -58,7 +58,7 @@ random_iv_u64(uint64_t *buf, uint16_t n)
|
||||
unsigned left = n & 0x7;
|
||||
unsigned i;
|
||||
|
||||
IPSEC_ASSERT((n & 0x3) == 0);
|
||||
RTE_ASSERT((n & 0x3) == 0);
|
||||
|
||||
for (i = 0; i < (n >> 3); i++)
|
||||
buf[i] = rte_rand();
|
||||
@ -75,9 +75,9 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
|
||||
int32_t payload_len;
|
||||
struct rte_crypto_sym_op *sym_cop;
|
||||
|
||||
IPSEC_ASSERT(m != NULL);
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
IPSEC_ASSERT(cop != NULL);
|
||||
RTE_ASSERT(m != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(cop != NULL);
|
||||
|
||||
payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
|
||||
sa->digest_len;
|
||||
@ -124,9 +124,9 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
|
||||
uint8_t *padding;
|
||||
uint16_t i;
|
||||
|
||||
IPSEC_ASSERT(m != NULL);
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
IPSEC_ASSERT(cop != NULL);
|
||||
RTE_ASSERT(m != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(cop != NULL);
|
||||
|
||||
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
|
||||
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
|
||||
@ -165,9 +165,9 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
|
||||
char *padding;
|
||||
struct rte_crypto_sym_op *sym_cop;
|
||||
|
||||
IPSEC_ASSERT(m != NULL);
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
IPSEC_ASSERT(cop != NULL);
|
||||
RTE_ASSERT(m != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(cop != NULL);
|
||||
|
||||
/* Payload length */
|
||||
pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
|
||||
@ -186,7 +186,7 @@ esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
|
||||
|
||||
padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
|
||||
|
||||
IPSEC_ASSERT(padding != NULL);
|
||||
RTE_ASSERT(padding != NULL);
|
||||
|
||||
ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
|
||||
sa->src, sa->dst);
|
||||
@ -238,9 +238,9 @@ esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
|
||||
struct ipsec_sa *sa __rte_unused,
|
||||
struct rte_crypto_op *cop)
|
||||
{
|
||||
IPSEC_ASSERT(m != NULL);
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
IPSEC_ASSERT(cop != NULL);
|
||||
RTE_ASSERT(m != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(cop != NULL);
|
||||
|
||||
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
|
||||
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
|
||||
|
@ -49,13 +49,13 @@ ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)
|
||||
|
||||
inip = rte_pktmbuf_mtod(m, struct ip*);
|
||||
|
||||
IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
|
||||
RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
|
||||
|
||||
offset += sizeof(struct ip);
|
||||
|
||||
outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
|
||||
|
||||
IPSEC_ASSERT(outip != NULL);
|
||||
RTE_ASSERT(outip != NULL);
|
||||
|
||||
/* Per RFC4301 5.1.2.1 */
|
||||
outip->ip_v = IPVERSION;
|
||||
@ -83,14 +83,14 @@ ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)
|
||||
|
||||
outip = rte_pktmbuf_mtod(m, struct ip*);
|
||||
|
||||
IPSEC_ASSERT(outip->ip_v == IPVERSION);
|
||||
RTE_ASSERT(outip->ip_v == IPVERSION);
|
||||
|
||||
offset += sizeof(struct ip);
|
||||
inip = (struct ip *)rte_pktmbuf_adj(m, offset);
|
||||
IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
|
||||
RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
|
||||
|
||||
/* Check packet is still bigger than IP header (inner) */
|
||||
IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
|
||||
RTE_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
|
||||
|
||||
/* RFC4301 5.1.2.1 Note 6 */
|
||||
if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
|
||||
|
@ -117,7 +117,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
|
||||
sa = sas[i];
|
||||
priv->sa = sa;
|
||||
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
|
||||
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
|
||||
|
||||
@ -139,7 +139,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
|
||||
continue;
|
||||
}
|
||||
|
||||
IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
|
||||
RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
|
||||
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
|
||||
priv = get_priv(pkt);
|
||||
sa = priv->sa;
|
||||
|
||||
IPSEC_ASSERT(sa != NULL);
|
||||
RTE_ASSERT(sa != NULL);
|
||||
|
||||
ret = sa->post_crypto(pkt, sa, cops[j]);
|
||||
if (unlikely(ret))
|
||||
|
@ -47,15 +47,6 @@
|
||||
#define MAX_PKT_BURST 32
|
||||
#define MAX_QP_PER_LCORE 256
|
||||
|
||||
#ifdef IPSEC_DEBUG
|
||||
#define IPSEC_ASSERT(exp) \
|
||||
if (!(exp)) { \
|
||||
rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
|
||||
}
|
||||
#else
|
||||
#define IPSEC_ASSERT(exp) do {} while (0)
|
||||
#endif /* IPSEC_DEBUG */
|
||||
|
||||
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
|
||||
|
||||
#define uint32_t_to_char(ip, a, b, c, d) do {\
|
||||
|
@ -321,7 +321,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
|
||||
|
||||
/* Construct Ethernet header. */
|
||||
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
|
||||
RTE_MBUF_ASSERT(ethdr != NULL);
|
||||
RTE_ASSERT(ethdr != NULL);
|
||||
|
||||
ether_addr_copy(dest_addr, ðdr->d_addr);
|
||||
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
|
||||
@ -353,7 +353,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
|
||||
|
||||
/* Remove the Ethernet header from the input packet */
|
||||
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
|
||||
RTE_MBUF_ASSERT(iphdr != NULL);
|
||||
RTE_ASSERT(iphdr != NULL);
|
||||
|
||||
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
|
||||
|
||||
|
@ -143,7 +143,7 @@ struct lthread_stack *_stack_alloc(void)
|
||||
struct lthread_stack *s;
|
||||
|
||||
s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
|
||||
LTHREAD_ASSERT(s != NULL);
|
||||
RTE_ASSERT(s != NULL);
|
||||
|
||||
s->root_sched = THIS_SCHED;
|
||||
s->stack_size = LTHREAD_MAX_STACK_SIZE;
|
||||
|
@ -197,16 +197,4 @@ struct lthread {
|
||||
uint64_t diag_ref; /* ref to user diag data */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/*
|
||||
* Assert
|
||||
*/
|
||||
#if LTHREAD_DIAG
|
||||
#define LTHREAD_ASSERT(expr) do { \
|
||||
if (!(expr)) \
|
||||
rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
|
||||
} while (0)
|
||||
#else
|
||||
#define LTHREAD_ASSERT(expr) do {} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* LTHREAD_INT_H */
|
||||
|
@ -170,7 +170,6 @@ int lthread_mutex_lock(struct lthread_mutex *m)
|
||||
_suspend();
|
||||
/* resumed, must loop and compete for the lock again */
|
||||
}
|
||||
LTHREAD_ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -231,7 +230,7 @@ int lthread_mutex_unlock(struct lthread_mutex *m)
|
||||
if (unblocked != NULL) {
|
||||
rte_atomic64_dec(&m->count);
|
||||
DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
|
||||
LTHREAD_ASSERT(unblocked->sched != NULL);
|
||||
RTE_ASSERT(unblocked->sched != NULL);
|
||||
_ready_queue_insert((struct lthread_sched *)
|
||||
unblocked->sched, unblocked);
|
||||
break;
|
||||
|
@ -138,14 +138,14 @@ _qnode_pool_create(const char *name, int prealloc_size) {
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
rte_socket_id());
|
||||
|
||||
LTHREAD_ASSERT(p);
|
||||
RTE_ASSERT(p);
|
||||
|
||||
p->stub = rte_malloc_socket(NULL,
|
||||
sizeof(struct qnode),
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
rte_socket_id());
|
||||
|
||||
LTHREAD_ASSERT(p->stub);
|
||||
RTE_ASSERT(p->stub);
|
||||
|
||||
if (name != NULL)
|
||||
strncpy(p->name, name, LT_MAX_NAME_SIZE);
|
||||
|
@ -129,7 +129,7 @@ _lthread_queue_create(const char *name)
|
||||
|
||||
/* allocated stub node */
|
||||
stub = _qnode_alloc();
|
||||
LTHREAD_ASSERT(stub);
|
||||
RTE_ASSERT(stub);
|
||||
|
||||
if (name != NULL)
|
||||
strncpy(new_queue->name, name, sizeof(new_queue->name));
|
||||
|
@ -268,7 +268,7 @@ struct lthread_sched *_lthread_sched_create(size_t stack_size)
|
||||
struct lthread_sched *new_sched;
|
||||
unsigned lcoreid = rte_lcore_id();
|
||||
|
||||
LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
|
||||
RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
|
||||
|
||||
if (stack_size == 0)
|
||||
stack_size = LTHREAD_MAX_STACK_SIZE;
|
||||
|
@ -94,7 +94,7 @@ void _lthread_key_pool_init(void)
|
||||
|
||||
pool = rte_ring_create(name,
|
||||
LTHREAD_MAX_KEYS, 0, 0);
|
||||
LTHREAD_ASSERT(pool);
|
||||
RTE_ASSERT(pool);
|
||||
|
||||
int i;
|
||||
|
||||
@ -240,7 +240,7 @@ void _lthread_tls_alloc(struct lthread *lt)
|
||||
|
||||
tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
|
||||
|
||||
LTHREAD_ASSERT(tls != NULL);
|
||||
RTE_ASSERT(tls != NULL);
|
||||
|
||||
tls->root_sched = (THIS_SCHED);
|
||||
lt->tls = tls;
|
||||
|
@ -43,6 +43,8 @@
|
||||
* the implementation is architecture-specific.
|
||||
*/
|
||||
|
||||
#include "rte_log.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -76,8 +78,13 @@ void rte_dump_registers(void);
|
||||
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
|
||||
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
|
||||
|
||||
#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
|
||||
#define RTE_ASSERT(exp) RTE_VERIFY(exp)
|
||||
#else
|
||||
#define RTE_ASSERT(exp) do {} while (0)
|
||||
#endif
|
||||
#define RTE_VERIFY(exp) do { \
|
||||
if (!(exp)) \
|
||||
if (unlikely(!(exp))) \
|
||||
rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
|
||||
} while (0)
|
||||
|
||||
|
@ -38,17 +38,9 @@
|
||||
|
||||
/* logging macros. */
|
||||
#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
|
||||
|
||||
#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
|
||||
|
||||
#define IP_FRAG_ASSERT(exp) \
|
||||
if (!(exp)) { \
|
||||
rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
|
||||
__func__, __LINE__); \
|
||||
}
|
||||
#else
|
||||
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
|
||||
#define IP_FRAG_ASSERT(exp) do {} while (0)
|
||||
#endif /* IP_FRAG_DEBUG */
|
||||
|
||||
#define IPV4_KEYLEN 1
|
||||
|
@ -107,7 +107,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
|
||||
|
||||
/* Fragment size should be a multiply of 8. */
|
||||
IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
|
||||
RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
|
||||
|
||||
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
|
||||
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
|
||||
|
@ -110,7 +110,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
|
||||
|
||||
/* Fragment size should be a multiple of 8. */
|
||||
IP_FRAG_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
|
||||
RTE_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
|
||||
|
||||
/* Check that pkts_out is big enough to hold all fragments */
|
||||
if (unlikely (frag_size * nb_pkts_out <
|
||||
|
@ -86,7 +86,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
|
||||
struct rte_pktmbuf_pool_private default_mbp_priv;
|
||||
uint16_t roomsz;
|
||||
|
||||
RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
|
||||
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
|
||||
|
||||
/* if no structure is provided, assume no mbuf private area */
|
||||
user_mbp_priv = opaque_arg;
|
||||
@ -100,7 +100,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
|
||||
user_mbp_priv = &default_mbp_priv;
|
||||
}
|
||||
|
||||
RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
|
||||
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
|
||||
user_mbp_priv->mbuf_data_room_size +
|
||||
user_mbp_priv->mbuf_priv_size);
|
||||
|
||||
@ -126,9 +126,9 @@ rte_pktmbuf_init(struct rte_mempool *mp,
|
||||
mbuf_size = sizeof(struct rte_mbuf) + priv_size;
|
||||
buf_len = rte_pktmbuf_data_room_size(mp);
|
||||
|
||||
RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
|
||||
RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
|
||||
RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
|
||||
RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
|
||||
RTE_ASSERT(mp->elt_size >= mbuf_size);
|
||||
RTE_ASSERT(buf_len <= UINT16_MAX);
|
||||
|
||||
memset(m, 0, mp->elt_size);
|
||||
|
||||
|
@ -938,12 +938,6 @@ struct rte_pktmbuf_pool_private {
|
||||
rte_mbuf_sanity_check(m, is_h); \
|
||||
} while (0)
|
||||
|
||||
/** MBUF asserts in debug mode */
|
||||
#define RTE_MBUF_ASSERT(exp) \
|
||||
if (!(exp)) { \
|
||||
rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
|
||||
}
|
||||
|
||||
#else /* RTE_LIBRTE_MBUF_DEBUG */
|
||||
|
||||
/** check mbuf type in debug mode */
|
||||
@ -952,9 +946,6 @@ if (!(exp)) { \
|
||||
/** check mbuf type in debug mode if mbuf pointer is not null */
|
||||
#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
|
||||
|
||||
/** MBUF asserts in debug mode */
|
||||
#define RTE_MBUF_ASSERT(exp) do { } while (0)
|
||||
|
||||
#endif /* RTE_LIBRTE_MBUF_DEBUG */
|
||||
|
||||
#ifdef RTE_MBUF_REFCNT_ATOMIC
|
||||
@ -1084,7 +1075,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
|
||||
if (rte_mempool_get(mp, &mb) < 0)
|
||||
return NULL;
|
||||
m = (struct rte_mbuf *)mb;
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
|
||||
rte_mbuf_refcnt_set(m, 1);
|
||||
return m;
|
||||
}
|
||||
@ -1100,7 +1091,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
|
||||
static inline void __attribute__((always_inline))
|
||||
__rte_mbuf_raw_free(struct rte_mbuf *m)
|
||||
{
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
|
||||
@ -1388,22 +1379,22 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
|
||||
switch (count % 4) {
|
||||
case 0:
|
||||
while (idx != count) {
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
rte_mbuf_refcnt_set(mbufs[idx], 1);
|
||||
rte_pktmbuf_reset(mbufs[idx]);
|
||||
idx++;
|
||||
case 3:
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
rte_mbuf_refcnt_set(mbufs[idx], 1);
|
||||
rte_pktmbuf_reset(mbufs[idx]);
|
||||
idx++;
|
||||
case 2:
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
rte_mbuf_refcnt_set(mbufs[idx], 1);
|
||||
rte_pktmbuf_reset(mbufs[idx]);
|
||||
idx++;
|
||||
case 1:
|
||||
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
|
||||
rte_mbuf_refcnt_set(mbufs[idx], 1);
|
||||
rte_pktmbuf_reset(mbufs[idx]);
|
||||
idx++;
|
||||
@ -1431,7 +1422,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
|
||||
{
|
||||
struct rte_mbuf *md;
|
||||
|
||||
RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) &&
|
||||
RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
|
||||
rte_mbuf_refcnt_read(mi) == 1);
|
||||
|
||||
/* if m is not direct, get the mbuf that embeds the data */
|
||||
|
@ -795,8 +795,8 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
|
||||
unsigned common_count;
|
||||
unsigned cache_count;
|
||||
|
||||
RTE_VERIFY(f != NULL);
|
||||
RTE_VERIFY(mp != NULL);
|
||||
RTE_ASSERT(f != NULL);
|
||||
RTE_ASSERT(mp != NULL);
|
||||
|
||||
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
|
||||
fprintf(f, " flags=%x\n", mp->flags);
|
||||
|
@ -77,7 +77,7 @@ __rte_red_init_tables(void)
|
||||
|
||||
scale = 1024.0;
|
||||
|
||||
RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
|
||||
RTE_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
|
||||
|
||||
for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
|
||||
double n = (double)i;
|
||||
|
@ -63,19 +63,6 @@ extern "C" {
|
||||
#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
|
||||
#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
|
||||
|
||||
#ifdef RTE_RED_DEBUG
|
||||
|
||||
#define RTE_RED_ASSERT(exp) \
|
||||
if (!(exp)) { \
|
||||
rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define RTE_RED_ASSERT(exp) do { } while(0)
|
||||
|
||||
#endif /* RTE_RED_DEBUG */
|
||||
|
||||
/**
|
||||
* Externs
|
||||
*
|
||||
@ -246,8 +233,8 @@ rte_red_enqueue_empty(const struct rte_red_config *red_cfg,
|
||||
{
|
||||
uint64_t time_diff = 0, m = 0;
|
||||
|
||||
RTE_RED_ASSERT(red_cfg != NULL);
|
||||
RTE_RED_ASSERT(red != NULL);
|
||||
RTE_ASSERT(red_cfg != NULL);
|
||||
RTE_ASSERT(red != NULL);
|
||||
|
||||
red->count ++;
|
||||
|
||||
@ -361,8 +348,8 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
|
||||
struct rte_red *red,
|
||||
const unsigned q)
|
||||
{
|
||||
RTE_RED_ASSERT(red_cfg != NULL);
|
||||
RTE_RED_ASSERT(red != NULL);
|
||||
RTE_ASSERT(red_cfg != NULL);
|
||||
RTE_ASSERT(red != NULL);
|
||||
|
||||
/**
|
||||
* EWMA filter (Sally Floyd and Van Jacobson):
|
||||
@ -424,8 +411,8 @@ rte_red_enqueue(const struct rte_red_config *red_cfg,
|
||||
const unsigned q,
|
||||
const uint64_t time)
|
||||
{
|
||||
RTE_RED_ASSERT(red_cfg != NULL);
|
||||
RTE_RED_ASSERT(red != NULL);
|
||||
RTE_ASSERT(red_cfg != NULL);
|
||||
RTE_ASSERT(red != NULL);
|
||||
|
||||
if (q != 0) {
|
||||
return rte_red_enqueue_nonempty(red_cfg, red, q);
|
||||
|
Loading…
x
Reference in New Issue
Block a user