common/qat: add scatter-gather header
This patch refactors the sgl struct so it includes a flexible array of flat buffers as sym and compress PMDs can have different size sgls. Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com> Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
parent
9d6d5b4d47
commit
944027acd4
drivers
@ -8,40 +8,53 @@
|
|||||||
|
|
||||||
int
|
int
|
||||||
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
|
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
|
||||||
struct qat_sgl *list, uint32_t data_len)
|
void *list_in, uint32_t data_len,
|
||||||
|
const uint16_t max_segs)
|
||||||
{
|
{
|
||||||
int nr = 1;
|
int nr = 1;
|
||||||
|
struct qat_sgl *list = (struct qat_sgl *)list_in;
|
||||||
uint32_t buf_len = rte_pktmbuf_iova(buf) -
|
/* buf_start allows the first buffer to start at an address before or
|
||||||
buf_start + rte_pktmbuf_data_len(buf);
|
* after the mbuf data start. It's used to either optimally align the
|
||||||
|
* dma to 64 or to start dma from an offset.
|
||||||
|
*/
|
||||||
|
uint32_t buf_len;
|
||||||
|
uint32_t first_buf_len = rte_pktmbuf_data_len(buf) +
|
||||||
|
(rte_pktmbuf_mtophys(buf) - buf_start);
|
||||||
|
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
|
||||||
|
uint8_t *virt_addr[max_segs];
|
||||||
|
virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) +
|
||||||
|
(rte_pktmbuf_mtophys(buf) - buf_start);
|
||||||
|
#endif
|
||||||
|
|
||||||
list->buffers[0].addr = buf_start;
|
list->buffers[0].addr = buf_start;
|
||||||
list->buffers[0].resrvd = 0;
|
list->buffers[0].resrvd = 0;
|
||||||
list->buffers[0].len = buf_len;
|
list->buffers[0].len = first_buf_len;
|
||||||
|
|
||||||
if (data_len <= buf_len) {
|
if (data_len <= first_buf_len) {
|
||||||
list->num_bufs = nr;
|
list->num_bufs = nr;
|
||||||
list->buffers[0].len = data_len;
|
list->buffers[0].len = data_len;
|
||||||
return 0;
|
goto sgl_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = buf->next;
|
buf = buf->next;
|
||||||
|
buf_len = first_buf_len;
|
||||||
while (buf) {
|
while (buf) {
|
||||||
if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
|
if (unlikely(nr == max_segs)) {
|
||||||
QAT_LOG(ERR,
|
QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
|
||||||
"QAT PMD exceeded size of QAT SGL entry(%u)",
|
max_segs);
|
||||||
QAT_SGL_MAX_NUMBER);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
list->buffers[nr].len = rte_pktmbuf_data_len(buf);
|
list->buffers[nr].len = rte_pktmbuf_data_len(buf);
|
||||||
list->buffers[nr].resrvd = 0;
|
list->buffers[nr].resrvd = 0;
|
||||||
list->buffers[nr].addr = rte_pktmbuf_iova(buf);
|
list->buffers[nr].addr = rte_pktmbuf_mtophys(buf);
|
||||||
|
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
|
||||||
|
virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*);
|
||||||
|
#endif
|
||||||
buf_len += list->buffers[nr].len;
|
buf_len += list->buffers[nr].len;
|
||||||
buf = buf->next;
|
buf = buf->next;
|
||||||
|
|
||||||
if (buf_len > data_len) {
|
if (buf_len >= data_len) {
|
||||||
list->buffers[nr].len -=
|
list->buffers[nr].len -=
|
||||||
buf_len - data_len;
|
buf_len - data_len;
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
@ -50,6 +63,22 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
|
|||||||
}
|
}
|
||||||
list->num_bufs = nr;
|
list->num_bufs = nr;
|
||||||
|
|
||||||
|
sgl_end:
|
||||||
|
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
|
||||||
|
{
|
||||||
|
uint16_t i;
|
||||||
|
QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
|
||||||
|
for (i = 0; i < list->num_bufs; i++) {
|
||||||
|
QAT_DP_LOG(INFO,
|
||||||
|
"QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
|
||||||
|
i, list->buffers[i].len,
|
||||||
|
list->buffers[i].addr);
|
||||||
|
QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
|
||||||
|
virt_addr[i], list->buffers[i].len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,11 +10,6 @@
|
|||||||
|
|
||||||
/**< Intel(R) QAT device name for PCI registration */
|
/**< Intel(R) QAT device name for PCI registration */
|
||||||
#define QAT_PCI_NAME qat
|
#define QAT_PCI_NAME qat
|
||||||
/*
|
|
||||||
* Maximum number of SGL entries
|
|
||||||
*/
|
|
||||||
#define QAT_SGL_MAX_NUMBER 16
|
|
||||||
|
|
||||||
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
|
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
|
||||||
|
|
||||||
/* Intel(R) QuickAssist Technology device generation is enumerated
|
/* Intel(R) QuickAssist Technology device generation is enumerated
|
||||||
@ -31,6 +26,7 @@ enum qat_service_type {
|
|||||||
QAT_SERVICE_COMPRESSION,
|
QAT_SERVICE_COMPRESSION,
|
||||||
QAT_SERVICE_INVALID
|
QAT_SERVICE_INVALID
|
||||||
};
|
};
|
||||||
|
|
||||||
#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
|
#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
|
||||||
|
|
||||||
/**< Common struct for scatter-gather list operations */
|
/**< Common struct for scatter-gather list operations */
|
||||||
@ -40,11 +36,17 @@ struct qat_flat_buf {
|
|||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
} __rte_packed;
|
} __rte_packed;
|
||||||
|
|
||||||
|
#define qat_sgl_hdr struct { \
|
||||||
|
uint64_t resrvd; \
|
||||||
|
uint32_t num_bufs; \
|
||||||
|
uint32_t num_mapped_bufs; \
|
||||||
|
}
|
||||||
|
|
||||||
|
__extension__
|
||||||
struct qat_sgl {
|
struct qat_sgl {
|
||||||
uint64_t resrvd;
|
qat_sgl_hdr;
|
||||||
uint32_t num_bufs;
|
/* flexible array of flat buffers*/
|
||||||
uint32_t num_mapped_bufs;
|
struct qat_flat_buf buffers[0];
|
||||||
struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
|
|
||||||
} __rte_packed __rte_cache_aligned;
|
} __rte_packed __rte_cache_aligned;
|
||||||
|
|
||||||
/** Common, i.e. not service-specific, statistics */
|
/** Common, i.e. not service-specific, statistics */
|
||||||
@ -64,7 +66,8 @@ struct qat_pci_device;
|
|||||||
|
|
||||||
int
|
int
|
||||||
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
|
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
|
||||||
struct qat_sgl *list, uint32_t data_len);
|
void *list_in, uint32_t data_len,
|
||||||
|
const uint16_t max_segs);
|
||||||
void
|
void
|
||||||
qat_stats_get(struct qat_pci_device *dev,
|
qat_stats_get(struct qat_pci_device *dev,
|
||||||
struct qat_common_stats *stats,
|
struct qat_common_stats *stats,
|
||||||
|
@ -495,8 +495,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
|
|||||||
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
|
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
|
||||||
QAT_COMN_PTR_TYPE_SGL);
|
QAT_COMN_PTR_TYPE_SGL);
|
||||||
ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
|
ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
|
||||||
&cookie->qat_sgl_src,
|
&cookie->qat_sgl_src,
|
||||||
qat_req->comn_mid.src_length);
|
qat_req->comn_mid.src_length,
|
||||||
|
QAT_SYM_SGL_MAX_NUMBER);
|
||||||
|
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
|
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
|
||||||
@ -509,9 +510,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
|
|||||||
cookie->qat_sgl_src_phys_addr;
|
cookie->qat_sgl_src_phys_addr;
|
||||||
else {
|
else {
|
||||||
ret = qat_sgl_fill_array(op->sym->m_dst,
|
ret = qat_sgl_fill_array(op->sym->m_dst,
|
||||||
dst_buf_start,
|
dst_buf_start,
|
||||||
&cookie->qat_sgl_dst,
|
&cookie->qat_sgl_dst,
|
||||||
qat_req->comn_mid.dst_length);
|
qat_req->comn_mid.dst_length,
|
||||||
|
QAT_SYM_SGL_MAX_NUMBER);
|
||||||
|
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
|
QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
|
||||||
|
@ -21,11 +21,21 @@
|
|||||||
*/
|
*/
|
||||||
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
|
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum number of SGL entries
|
||||||
|
*/
|
||||||
|
#define QAT_SYM_SGL_MAX_NUMBER 16
|
||||||
|
|
||||||
struct qat_sym_session;
|
struct qat_sym_session;
|
||||||
|
|
||||||
|
struct qat_sym_sgl {
|
||||||
|
qat_sgl_hdr;
|
||||||
|
struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
|
||||||
|
} __rte_packed __rte_cache_aligned;
|
||||||
|
|
||||||
struct qat_sym_op_cookie {
|
struct qat_sym_op_cookie {
|
||||||
struct qat_sgl qat_sgl_src;
|
struct qat_sym_sgl qat_sgl_src;
|
||||||
struct qat_sgl qat_sgl_dst;
|
struct qat_sym_sgl qat_sgl_dst;
|
||||||
phys_addr_t qat_sgl_src_phys_addr;
|
phys_addr_t qat_sgl_src_phys_addr;
|
||||||
phys_addr_t qat_sgl_dst_phys_addr;
|
phys_addr_t qat_sgl_dst_phys_addr;
|
||||||
};
|
};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user