common/octeontx: update mbox to version 1.1.3

Sync mail box data structures to version 1.1.3.
Add mail box version verification and defer initializing octeontx
devices if mail box version mismatches.
Update OCTEON TX limitaion with max mempool size used.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2019-11-20 09:18:02 +05:30 committed by Jerin Jacob
parent 24252a60ad
commit b4134b2d31
10 changed files with 138 additions and 7 deletions

View File

@ -139,3 +139,10 @@ follows:
When timvf is used as Event timer adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
Max mempool size
~~~~~~~~~~~~~~~~
Max mempool size when using OCTEON TX Eventdev (SSO) should be limited to 128K.
When running dpdk-test-eventdev on OCTEON TX the application can limit the
number of mbufs by using the option ``--pool_sz 131072``

View File

@ -174,3 +174,10 @@ The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
up to 32k bytes can still reach the host interface.
Maximum mempool size
~~~~~~~~~~~~~~~~~~~~
The maximum mempool size supplied to Rx queue setup should be less than 128K.
When running testpmd on OCTEON TX the application can limit the number of mbufs
by using the option ``--total-num-mbufs=131072``.

View File

@ -31,6 +31,7 @@ enum {
struct mbox {
int init_once;
uint8_t ready;
uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
uint16_t tag_own; /* Last tag which was written to own channel */
@ -59,6 +60,13 @@ struct mbox_ram_hdr {
};
};
/* MBOX interface version message */
struct mbox_intf_ver {
uint32_t platform:12;
uint32_t major:10;
uint32_t minor:10;
};
int octeontx_logtype_mbox;
RTE_INIT(otx_init_log)
@ -247,3 +255,92 @@ octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
}
static int
octeontx_start_domain(void)
{
struct octeontx_mbox_hdr hdr = {0};
int result = -EINVAL;
hdr.coproc = NO_COPROC;
hdr.msg = RM_START_APP;
result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (result != 0) {
mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n",
result, hdr.res_code);
result = -EINVAL;
}
return result;
}
static int
octeontx_check_mbox_version(struct mbox_intf_ver app_intf_ver,
struct mbox_intf_ver *intf_ver)
{
struct mbox_intf_ver kernel_intf_ver = {0};
struct octeontx_mbox_hdr hdr = {0};
int result = 0;
hdr.coproc = NO_COPROC;
hdr.msg = RM_INTERFACE_VERSION;
result = octeontx_mbox_send(&hdr, &app_intf_ver, sizeof(app_intf_ver),
&kernel_intf_ver, sizeof(kernel_intf_ver));
if (result != sizeof(kernel_intf_ver)) {
mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n",
result, hdr.res_code);
result = -EINVAL;
}
if (intf_ver)
*intf_ver = kernel_intf_ver;
if (app_intf_ver.platform != kernel_intf_ver.platform ||
app_intf_ver.major != kernel_intf_ver.major ||
app_intf_ver.minor != kernel_intf_ver.minor)
result = -EINVAL;
return result;
}
int
octeontx_mbox_init(void)
{
const struct mbox_intf_ver MBOX_INTERFACE_VERSION = {
.platform = 0x01,
.major = 0x01,
.minor = 0x03
};
struct mbox_intf_ver rm_intf_ver = {0};
struct mbox *m = &octeontx_mbox;
int ret;
if (m->ready)
return 0;
ret = octeontx_start_domain();
if (ret < 0) {
m->init_once = 0;
return ret;
}
ret = octeontx_check_mbox_version(MBOX_INTERFACE_VERSION,
&rm_intf_ver);
if (ret < 0) {
mbox_log_err("MBOX version: Kernel(%d.%d.%d) != DPDK(%d.%d.%d)",
rm_intf_ver.platform, rm_intf_ver.major,
rm_intf_ver.minor, MBOX_INTERFACE_VERSION.platform,
MBOX_INTERFACE_VERSION.major,
MBOX_INTERFACE_VERSION.minor);
m->init_once = 0;
return -EINVAL;
}
m->ready = 1;
rte_mb();
return 0;
}

View File

@ -11,6 +11,11 @@
#define SSOW_BAR4_LEN (64 * 1024)
#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
#define NO_COPROC 0x0
#define RM_START_APP 0x1
#define RM_INTERFACE_VERSION 0x2
#define MBOX_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
@ -26,9 +31,11 @@ struct octeontx_mbox_hdr {
uint16_t vfid; /* VF index or pf resource index local to the domain */
uint8_t coproc; /* Coprocessor id */
uint8_t msg; /* Message id */
uint8_t oob; /* out of band data */
uint8_t res_code; /* Functional layer response code */
};
int octeontx_mbox_init(void);
int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
int octeontx_mbox_set_reg(uint8_t *reg);
int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,

View File

@ -2,6 +2,7 @@ DPDK_20.0 {
global:
octeontx_logtype_mbox;
octeontx_mbox_init;
octeontx_mbox_send;
octeontx_mbox_set_ram_mbox_base;
octeontx_mbox_set_reg;

View File

@ -77,6 +77,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
}
struct ssovf_mbox_grp_pri {
uint8_t vhgrp_id;
uint8_t wgt_left; /* Read only */
uint8_t weight;
uint8_t affinity;
@ -95,6 +96,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
hdr.msg = SSO_GRP_SET_PRIORITY;
hdr.vfid = queue;
grp.vhgrp_id = queue;
grp.weight = 0xff;
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
@ -433,7 +435,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
pki_qos.mmask.f_grptag_ok = 1;
pki_qos.mmask.f_grptag_bad = 1;
pki_qos.tag_type = queue_conf->ev.sched_type;
pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
pki_qos.qos_entry.port_add = 0;
pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
@ -780,6 +782,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return 0;
}
octeontx_mbox_init();
ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);

View File

@ -19,8 +19,6 @@
#define SSO_MAX_VHGRP (64)
#define SSO_MAX_VHWS (32)
#define SSO_VHGRP_AQ_THR (0x1E0ULL)
struct ssovf_res {
uint16_t domain;
uint16_t vfid;

View File

@ -507,6 +507,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
RTE_SET_USED(node_id);
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
octeontx_mbox_init();
object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
if (object_size > FPA_MAX_OBJ_SIZE) {
errno = EINVAL;

View File

@ -27,6 +27,7 @@
#define MBOX_BGX_PORT_SET_BP 11
#define MBOX_BGX_PORT_SET_BCAST 12
#define MBOX_BGX_PORT_SET_MCAST 13
#define MBOX_BGX_PORT_SET_MTU 14
/* BGX port configuration parameters: */
typedef struct octeontx_mbox_bgx_port_conf {
@ -51,6 +52,8 @@ typedef struct octeontx_mbox_bgx_port_conf {
typedef struct octeontx_mbox_bgx_port_status {
uint8_t link_up;
uint8_t bp;
uint8_t duplex;
uint32_t speed;
} octeontx_mbox_bgx_port_status_t;
/* BGX port statistics: */

View File

@ -33,6 +33,9 @@
#define MBOX_PKI_PORT_RESET_STATS 18
#define MBOX_PKI_GET_PORT_CONFIG 19
#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
#define MBOX_PKI_PORT_ALLOC_QPG 21
#define MBOX_PKI_PORT_FREE_QPG 22
#define MBOX_PKI_SET_PORT_CONFIG 23
#define MBOX_PKI_MAX_QOS_ENTRY 64
@ -64,6 +67,7 @@ typedef struct mbox_pki_port_cfg {
struct {
uint8_t fcs_pres:1;
uint8_t fcs_skip:1;
uint8_t inst_skip:1;
uint8_t parse_mode:1;
uint8_t mpls_parse:1;
uint8_t inst_hdr_parse:1;
@ -74,6 +78,7 @@ typedef struct mbox_pki_port_cfg {
} mmask;
uint8_t fcs_pres;
uint8_t fcs_skip;
uint8_t inst_skip;
uint8_t parse_mode;
uint8_t mpls_parse;
uint8_t inst_hdr_parse;
@ -189,6 +194,9 @@ struct mbox_pki_qos_entry {
uint16_t gaura;
uint8_t grptag_ok;
uint8_t grptag_bad;
uint8_t ena_red;
uint8_t ena_drop;
uint8_t tag_type;
};
/* pki flow/style enable qos */
@ -201,7 +209,7 @@ typedef struct mbox_pki_port_create_qos {
struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
} mbox_pki_qos_cfg_t;
/* pki flow/style enable qos */
/* pki flow/style modify qos */
typedef struct mbox_pki_port_modify_qos_entry {
uint8_t port_type;
uint16_t index;
@ -214,11 +222,10 @@ typedef struct mbox_pki_port_modify_qos_entry {
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
uint8_t tag_type;
struct mbox_pki_qos_entry qos_entry;
} mbox_pki_mod_qos_t;
/* pki flow/style enable qos */
/* pki flow/style delete qos */
typedef struct mbox_pki_port_delete_qos_entry {
uint8_t port_type;
uint16_t index;
@ -372,6 +379,7 @@ struct pki_qos_entry {
uint8_t grptag_bad;
uint8_t ena_red;
uint8_t ena_drop;
uint8_t tag_type;
};
#define PKO_MAX_QOS_ENTRY 64
@ -405,7 +413,6 @@ typedef struct pki_port_modify_qos_entry {
uint8_t f_grptag_bad:1;
uint8_t f_tag_type:1;
} mmask;
uint8_t tag_type;
struct pki_qos_entry qos_entry;
} pki_mod_qos_t;