net/mlx4: split the definitions to the header file

Make priv_lock/priv_unlock functions and some other structs/defines visible
from different source files by placing them into mlx4.h header.

Signed-off-by: Vasily Philipov <vasilyf@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Vasily Philipov 2017-03-05 09:51:31 +02:00 committed by Ferruh Yigit
parent 3fca2ab597
commit 58610cdde8
2 changed files with 189 additions and 191 deletions

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright 2012-2015 6WIND S.A.
* Copyright 2012 Mellanox.
* Copyright 2012-2017 6WIND S.A.
* Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -58,20 +58,6 @@
#include <linux/sockios.h>
#include <fcntl.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
/* DPDK headers don't like -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_dev.h>
@ -86,9 +72,6 @@
#include <rte_log.h>
#include <rte_alarm.h>
#include <rte_memory.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
/* Generated configuration header. */
#include "mlx4_autoconf.h"
@ -96,21 +79,6 @@
/* PMD header. */
#include "mlx4.h"
/* Runtime logging through RTE_LOG() is enabled when not in debugging mode.
* Intermediate LOG_*() macros add the required end-of-line characters. */
#ifndef NDEBUG
#define INFO(...) DEBUG(__VA_ARGS__)
#define WARN(...) DEBUG(__VA_ARGS__)
#define ERROR(...) DEBUG(__VA_ARGS__)
#else
#define LOG__(level, m, ...) \
RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
#define INFO(...) LOG_(INFO, __VA_ARGS__)
#define WARN(...) LOG_(WARNING, __VA_ARGS__)
#define ERROR(...) LOG_(ERR, __VA_ARGS__)
#endif
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
#define DATA_LEN(m) ((m)->data_len)
@ -137,157 +105,6 @@ typedef union {
(((val) & (from)) / ((from) / (to))) : \
(((val) & (from)) * ((to) / (from))))
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
};
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
#endif
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};
/* RX element (scattered packets). */
struct rxq_elt_sp {
struct ibv_recv_wr wr; /* Work Request. */
struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
};
/* RX element. */
struct rxq_elt {
struct ibv_recv_wr wr; /* Work Request. */
struct ibv_sge sge; /* Scatter/Gather Element. */
/* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
};
/* RX queue descriptor. */
struct rxq {
struct priv *priv; /* Back pointer to private data. */
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
/*
* Each VLAN ID requires a separate flow steering rule.
*/
BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
struct ibv_flow *promisc_flow; /* Promiscuous flow. */
struct ibv_flow *allmulti_flow; /* Multicast flow. */
unsigned int port_id; /* Port ID for incoming packets. */
unsigned int elts_n; /* (*elts)[] length. */
unsigned int elts_head; /* Current index in (*elts)[]. */
union {
struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
struct rxq_elt (*no_sp)[]; /* RX elements. */
} elts;
unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
/* TX element. */
struct txq_elt {
struct rte_mbuf *buf;
};
/* Linear buffer type. It is used when transmitting buffers with too many
* segments that do not fit the hardware queue (see max_send_sge).
* Extra segments are copied (linearized) in such buffers, replacing the
* last SGE during TX.
* The size is arbitrary but large enough to hold a jumbo frame with
* 8 segments considering mbuf.buf_len is about 2048 bytes. */
typedef uint8_t linear_t[16384];
/* TX queue descriptor. */
struct txq {
struct priv *priv; /* Back pointer to private data. */
struct {
const struct rte_mempool *mp; /* Cached Memory Pool. */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* mr->lkey */
} mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
#if MLX4_PMD_MAX_INLINE > 0
uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
#endif
unsigned int elts_n; /* (*elts)[] length. */
struct txq_elt (*elts)[]; /* TX elements. */
unsigned int elts_head; /* Current index in (*elts)[]. */
unsigned int elts_tail; /* First element awaiting completion. */
unsigned int elts_comp; /* Number of completion requests. */
unsigned int elts_comp_cd; /* Countdown for next completion request. */
unsigned int elts_comp_cd_init; /* Initial value for countdown. */
struct mlx4_txq_stats stats; /* TX queue counters. */
linear_t (*elts_linear)[]; /* Linearized buffers. */
struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
/*
* MAC addresses array and configuration bit-field.
* An extra entry that cannot be modified by the DPDK is reserved
* for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
*/
struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
/* VLAN filters. */
struct {
unsigned int enabled:1; /* If enabled. */
unsigned int id:12; /* VLAN ID (0-4095). */
} vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
unsigned int started:1; /* Device started, flows enabled. */
unsigned int promisc:1; /* Device in promiscuous mode. */
unsigned int allmulti:1; /* Device receives all multicast packets. */
unsigned int hw_qpg:1; /* QP groups are supported. */
unsigned int hw_tss:1; /* TSS is supported. */
unsigned int hw_rss:1; /* RSS is supported. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
#ifdef INLINE_RECV
unsigned int inl_recv_size; /* Inline recv size */
#endif
unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
/* RX/TX queues. */
struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
rte_spinlock_t lock; /* Lock for control functions. */
};
/* Local storage for secondary process data. */
struct mlx4_secondary_data {
struct rte_eth_dev_data data; /* Local device data. */
@ -335,8 +152,7 @@ mlx4_get_priv(struct rte_eth_dev *dev)
* @param priv
* Pointer to private structure.
*/
static void
priv_lock(struct priv *priv)
void priv_lock(struct priv *priv)
{
rte_spinlock_lock(&priv->lock);
}
@ -347,8 +163,7 @@ priv_lock(struct priv *priv)
* @param priv
* Pointer to private structure.
*/
static void
priv_unlock(struct priv *priv)
void priv_unlock(struct priv *priv)
{
rte_spinlock_unlock(&priv->lock);
}

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright 2012-2015 6WIND S.A.
* Copyright 2012 Mellanox.
* Copyright 2012-2017 6WIND S.A.
* Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -38,6 +38,33 @@
#include <stdint.h>
#include <limits.h>
/*
* Runtime logging through RTE_LOG() is enabled when not in debugging mode.
* Intermediate LOG_*() macros add the required end-of-line characters.
*/
#ifndef NDEBUG
#define INFO(...) DEBUG(__VA_ARGS__)
#define WARN(...) DEBUG(__VA_ARGS__)
#define ERROR(...) DEBUG(__VA_ARGS__)
#else
#define LOG__(level, m, ...) \
RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
#define INFO(...) LOG_(INFO, __VA_ARGS__)
#define WARN(...) LOG_(WARNING, __VA_ARGS__)
#define ERROR(...) LOG_(ERR, __VA_ARGS__)
#endif
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
/*
* Maximum number of simultaneous MAC addresses supported.
*
@ -160,4 +187,160 @@ enum {
#define claim_positive(...) (__VA_ARGS__)
#endif /* NDEBUG */
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
};
/* RX element (scattered packets). */
struct rxq_elt_sp {
struct ibv_recv_wr wr; /* Work Request. */
struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
};
/* RX element. */
struct rxq_elt {
struct ibv_recv_wr wr; /* Work Request. */
struct ibv_sge sge; /* Scatter/Gather Element. */
/* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
};
/* RX queue descriptor. */
struct rxq {
struct priv *priv; /* Back pointer to private data. */
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
/*
* Each VLAN ID requires a separate flow steering rule.
*/
BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
struct ibv_flow *promisc_flow; /* Promiscuous flow. */
struct ibv_flow *allmulti_flow; /* Multicast flow. */
unsigned int port_id; /* Port ID for incoming packets. */
unsigned int elts_n; /* (*elts)[] length. */
unsigned int elts_head; /* Current index in (*elts)[]. */
union {
struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
struct rxq_elt (*no_sp)[]; /* RX elements. */
} elts;
unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
/* TX element. */
struct txq_elt {
struct rte_mbuf *buf;
};
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
#endif
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};
/*
* Linear buffer type. It is used when transmitting buffers with too many
* segments that do not fit the hardware queue (see max_send_sge).
* Extra segments are copied (linearized) in such buffers, replacing the
* last SGE during TX.
* The size is arbitrary but large enough to hold a jumbo frame with
* 8 segments considering mbuf.buf_len is about 2048 bytes.
*/
typedef uint8_t linear_t[16384];
/* TX queue descriptor. */
struct txq {
struct priv *priv; /* Back pointer to private data. */
struct {
const struct rte_mempool *mp; /* Cached Memory Pool. */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* mr->lkey */
} mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
#if MLX4_PMD_MAX_INLINE > 0
uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
#endif
unsigned int elts_n; /* (*elts)[] length. */
struct txq_elt (*elts)[]; /* TX elements. */
unsigned int elts_head; /* Current index in (*elts)[]. */
unsigned int elts_tail; /* First element awaiting completion. */
unsigned int elts_comp; /* Number of completion requests. */
unsigned int elts_comp_cd; /* Countdown for next completion request. */
unsigned int elts_comp_cd_init; /* Initial value for countdown. */
struct mlx4_txq_stats stats; /* TX queue counters. */
linear_t (*elts_linear)[]; /* Linearized buffers. */
struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
/*
* MAC addresses array and configuration bit-field.
* An extra entry that cannot be modified by the DPDK is reserved
* for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
*/
struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
/* VLAN filters. */
struct {
unsigned int enabled:1; /* If enabled. */
unsigned int id:12; /* VLAN ID (0-4095). */
} vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
unsigned int started:1; /* Device started, flows enabled. */
unsigned int promisc:1; /* Device in promiscuous mode. */
unsigned int allmulti:1; /* Device receives all multicast packets. */
unsigned int hw_qpg:1; /* QP groups are supported. */
unsigned int hw_tss:1; /* TSS is supported. */
unsigned int hw_rss:1; /* RSS is supported. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
#ifdef INLINE_RECV
unsigned int inl_recv_size; /* Inline recv size */
#endif
unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
/* RX/TX queues. */
struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
rte_spinlock_t lock; /* Lock for control functions. */
};
void priv_lock(struct priv *priv);
void priv_unlock(struct priv *priv);
#endif /* RTE_PMD_MLX4_H_ */