common/octeontx2: fix mbox memory access

Octeontx2 PMD's mailbox client uses device memory to send messages
to mailbox server in the admin function Linux kernel driver.
The device memory used for the mailbox communication needs to
be qualified as volatile memory type to avoid unaligned device
memory accesses because of compiler's memory access coalescing.

This patch modifies the mailbox request and responses as volatile
type which were non-volatile earlier and accessed from unaligned
memory addresses which resulted in bus errors on Fedora 30 with
gcc 9.1.1.

Fixes: 2b71657c86 ("common/octeontx2: add mbox request and response definition")

Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Vamsi Attunuru 2019-08-02 12:27:16 +05:30 committed by Thomas Monjalon
parent 696202ca53
commit 195981133a
4 changed files with 29 additions and 29 deletions

View File

@ -553,16 +553,16 @@ struct npa_aq_enq_req {
* LF fills the pool_id in aura.pool_addr. AF will translate
* the pool_id to pool context pointer.
*/
struct npa_aura_s aura;
__otx2_io struct npa_aura_s aura;
/* Valid when op == WRITE/INIT and ctype == POOL */
struct npa_pool_s pool;
__otx2_io struct npa_pool_s pool;
};
/* Mask data when op == WRITE (1=write, 0=don't write) */
union {
/* Valid when op == WRITE and ctype == AURA */
struct npa_aura_s aura_mask;
__otx2_io struct npa_aura_s aura_mask;
/* Valid when op == WRITE and ctype == POOL */
struct npa_pool_s pool_mask;
__otx2_io struct npa_pool_s pool_mask;
};
};
@ -570,9 +570,9 @@ struct npa_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
/* Valid when op == READ and ctype == AURA */
struct npa_aura_s aura;
__otx2_io struct npa_aura_s aura;
/* Valid when op == READ and ctype == POOL */
struct npa_pool_s pool;
__otx2_io struct npa_pool_s pool;
};
};
@ -656,39 +656,39 @@ struct nix_aq_enq_req {
uint8_t __otx2_io op;
union {
/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
struct nix_rq_ctx_s rq;
__otx2_io struct nix_rq_ctx_s rq;
/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
struct nix_sq_ctx_s sq;
__otx2_io struct nix_sq_ctx_s sq;
/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
struct nix_cq_ctx_s cq;
__otx2_io struct nix_cq_ctx_s cq;
/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
struct nix_rsse_s rss;
__otx2_io struct nix_rsse_s rss;
/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
struct nix_rx_mce_s mce;
__otx2_io struct nix_rx_mce_s mce;
};
/* Mask data when op == WRITE (1=write, 0=don't write) */
union {
/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
struct nix_rq_ctx_s rq_mask;
__otx2_io struct nix_rq_ctx_s rq_mask;
/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
struct nix_sq_ctx_s sq_mask;
__otx2_io struct nix_sq_ctx_s sq_mask;
/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
struct nix_cq_ctx_s cq_mask;
__otx2_io struct nix_cq_ctx_s cq_mask;
/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
struct nix_rsse_s rss_mask;
__otx2_io struct nix_rsse_s rss_mask;
/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
struct nix_rx_mce_s mce_mask;
__otx2_io struct nix_rx_mce_s mce_mask;
};
};
struct nix_aq_enq_rsp {
struct mbox_msghdr hdr;
union {
struct nix_rq_ctx_s rq;
struct nix_sq_ctx_s sq;
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
__otx2_io struct nix_rq_ctx_s rq;
__otx2_io struct nix_sq_ctx_s sq;
__otx2_io struct nix_cq_ctx_s cq;
__otx2_io struct nix_rsse_s rss;
__otx2_io struct nix_rx_mce_s mce;
};
};

View File

@ -7,7 +7,7 @@
#define npa_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__)
static inline void
npa_lf_pool_dump(struct npa_pool_s *pool)
npa_lf_pool_dump(__otx2_io struct npa_pool_s *pool)
{
npa_dump("W0: Stack base\t\t0x%"PRIx64"", pool->stack_base);
npa_dump("W1: ena \t\t%d\nW1: nat_align \t\t%d\nW1: stack_caching \t%d",
@ -45,7 +45,7 @@ npa_lf_pool_dump(struct npa_pool_s *pool)
}
static inline void
npa_lf_aura_dump(struct npa_aura_s *aura)
npa_lf_aura_dump(__otx2_io struct npa_aura_s *aura)
{
npa_dump("W0: Pool addr\t\t0x%"PRIx64"\n", aura->pool_addr);

View File

@ -355,14 +355,14 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
aura_init_req->aura_id = aura_id;
aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
aura_init_req->op = NPA_AQ_INSTOP_INIT;
memcpy(&aura_init_req->aura, aura, sizeof(*aura));
otx2_mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
pool_init_req->aura_id = aura_id;
pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
pool_init_req->op = NPA_AQ_INSTOP_INIT;
memcpy(&pool_init_req->pool, pool, sizeof(*pool));
otx2_mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_wait_for_rsp(mbox, 0);
@ -605,9 +605,9 @@ npa_lf_aura_range_update_check(uint64_t aura_handle)
uint64_t aura_id = npa_lf_aura_handle_to_aura(aura_handle);
struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
struct npa_aura_lim *lim = lf->aura_lim;
__otx2_io struct npa_pool_s *pool;
struct npa_aq_enq_req *req;
struct npa_aq_enq_rsp *rsp;
struct npa_pool_s *pool;
int rc;
req = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);

View File

@ -235,7 +235,7 @@ otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
}
static inline void
nix_lf_sq_dump(struct nix_sq_ctx_s *ctx)
nix_lf_sq_dump(__otx2_io struct nix_sq_ctx_s *ctx)
{
nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
ctx->sqe_way_mask, ctx->cq);
@ -295,7 +295,7 @@ nix_lf_sq_dump(struct nix_sq_ctx_s *ctx)
}
static inline void
nix_lf_rq_dump(struct nix_rq_ctx_s *ctx)
nix_lf_rq_dump(__otx2_io struct nix_rq_ctx_s *ctx)
{
nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
ctx->wqe_aura, ctx->substream);
@ -355,7 +355,7 @@ nix_lf_rq_dump(struct nix_rq_ctx_s *ctx)
}
static inline void
nix_lf_cq_dump(struct nix_cq_ctx_s *ctx)
nix_lf_cq_dump(__otx2_io struct nix_cq_ctx_s *ctx)
{
nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);