From 1947bd18580bfd2eff1f72913fe140df9cda60fd Mon Sep 17 00:00:00 2001 From: Fiona Trahe Date: Mon, 23 Jul 2018 14:06:51 +0100 Subject: [PATCH] compress/qat: support scatter-gather buffers This patch adds Scatter-Gather List (SGL) feature to QAT compression PMD. Signed-off-by: Tomasz Jozwiak Signed-off-by: Fiona Trahe --- config/common_base | 1 + config/rte_config.h | 1 + doc/guides/compressdevs/features/qat.ini | 3 ++ doc/guides/compressdevs/qat_comp.rst | 2 -- drivers/compress/qat/qat_comp.c | 41 +++++++++++++++++++++--- drivers/compress/qat/qat_comp.h | 9 ++++++ drivers/compress/qat/qat_comp_pmd.c | 25 ++++++++++++++- 7 files changed, 75 insertions(+), 7 deletions(-) diff --git a/config/common_base b/config/common_base index a061c2108d..6d82b91c39 100644 --- a/config/common_base +++ b/config/common_base @@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n # Max. number of QuickAssist devices, which can be detected and attached # CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 +CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 # # Compile PMD for virtio crypto devices diff --git a/config/rte_config.h b/config/rte_config.h index 28f04b41da..a8e4797749 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -89,6 +89,7 @@ /* QuickAssist device */ /* Max. number of QuickAssist devices which can be attached */ #define RTE_PMD_QAT_MAX_PCI_DEVICES 48 +#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16 /* virtio crypto defines */ #define RTE_MAX_VIRTIO_CRYPTO 32 diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini index 12bfb21d3c..5cd4524ba9 100644 --- a/doc/guides/compressdevs/features/qat.ini +++ b/doc/guides/compressdevs/features/qat.ini @@ -5,6 +5,9 @@ ; [Features] HW Accelerated = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y Deflate = Y Adler32 = Y Crc32 = Y diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst index 167f816b70..8b1270b708 100644 --- a/doc/guides/compressdevs/qat_comp.rst +++ b/doc/guides/compressdevs/qat_comp.rst @@ -35,8 +35,6 @@ Checksum generation: Limitations ----------- -* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations. - * Compressdev level 0, no compression, is not supported. * Dynamic Huffman encoding is not yet supported. diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index e8019ebc83..cbf7614e9a 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -21,10 +21,12 @@ int qat_comp_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie __rte_unused, + void *op_cookie, enum qat_device_gen qat_dev_gen __rte_unused) { struct rte_comp_op *op = in_op; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; struct qat_comp_xform *qat_xform = op->private_xform; const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; struct icp_qat_fw_comp_req *comp_req = @@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, comp_req->comp_pars.comp_len = op->src.length; comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst); - /* sgl */ if (op->m_src->next != NULL || op->m_dst->next != NULL) { - QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather"); - return -EINVAL; + /* sgl */ + int ret = 0; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->m_src, + rte_pktmbuf_mtophys_offset(op->m_src, + op->src.offset), + &cookie->qat_sgl_src, + op->src.length, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + ret = qat_sgl_fill_array(op->m_dst, + rte_pktmbuf_mtophys_offset(op->m_dst, + op->dst.offset), + &cookie->qat_sgl_dst, + comp_req->comp_pars.out_buffer_sz, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + comp_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + comp_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + comp_req->comn_mid.src_length = 0; + comp_req->comn_mid.dst_length = 0; } else { + /* flat aka linear buffer */ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_FLAT); comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src); diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h index 9e6861b8a3..8d315efb03 100644 --- a/drivers/compress/qat/qat_comp.h +++ b/drivers/compress/qat/qat_comp.h @@ -24,7 +24,16 @@ enum qat_comp_request_type { REQ_COMP_END }; +struct qat_comp_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS]; +} __rte_packed __rte_cache_aligned; + struct qat_comp_op_cookie { + struct qat_comp_sgl qat_sgl_src; + struct qat_comp_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; }; struct qat_comp_xform { diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index 764c053c2a..b89975fcd9 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { RTE_COMP_FF_ADLER32_CHECKSUM | RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | RTE_COMP_FF_SHAREABLE_PRIV_XFORM | - RTE_COMP_FF_HUFFMAN_FIXED, + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, .window_size = {.min = 15, .max = 15, .increment = 0} }, {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; @@ -71,7 +74,9 @@ static int qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, uint32_t max_inflight_ops, int socket_id) { + struct qat_qp *qp; int ret = 0; + uint32_t i; struct qat_qp_config qat_qp_conf; struct qat_qp **qp_addr = @@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] = *qp_addr; + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_comp_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_dst); + } + return ret; }