compress/qat: support scatter-gather buffers

This patch adds Scatter-Gather List (SGL) feature to
QAT compression PMD.

Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
Fiona Trahe 2018-07-23 14:06:51 +01:00 committed by Pablo de Lara
parent 944027acd4
commit 1947bd1858
7 changed files with 75 additions and 7 deletions

View File

@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
#
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
#
# Compile PMD for virtio crypto devices

View File

@ -89,6 +89,7 @@
/* QuickAssist device */
/* Max. number of QuickAssist devices which can be attached */
#define RTE_PMD_QAT_MAX_PCI_DEVICES 48
#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16
/* virtio crypto defines */
#define RTE_MAX_VIRTIO_CRYPTO 32

View File

@ -5,6 +5,9 @@
;
[Features]
HW Accelerated = Y
OOP SGL In SGL Out = Y
OOP SGL In LB Out = Y
OOP LB In SGL Out = Y
Deflate = Y
Adler32 = Y
Crc32 = Y

View File

@ -35,8 +35,6 @@ Checksum generation:
Limitations
-----------
* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations.
* Compressdev level 0, no compression, is not supported.
* Dynamic Huffman encoding is not yet supported.

View File

@ -21,10 +21,12 @@
int
qat_comp_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie __rte_unused,
void *op_cookie,
enum qat_device_gen qat_dev_gen __rte_unused)
{
struct rte_comp_op *op = in_op;
struct qat_comp_op_cookie *cookie =
(struct qat_comp_op_cookie *)op_cookie;
struct qat_comp_xform *qat_xform = op->private_xform;
const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
struct icp_qat_fw_comp_req *comp_req =
@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
comp_req->comp_pars.comp_len = op->src.length;
comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst);
/* sgl */
if (op->m_src->next != NULL || op->m_dst->next != NULL) {
QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather");
return -EINVAL;
/* sgl */
int ret = 0;
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
ret = qat_sgl_fill_array(op->m_src,
rte_pktmbuf_mtophys_offset(op->m_src,
op->src.offset),
&cookie->qat_sgl_src,
op->src.length,
RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
if (ret) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
return ret;
}
ret = qat_sgl_fill_array(op->m_dst,
rte_pktmbuf_mtophys_offset(op->m_dst,
op->dst.offset),
&cookie->qat_sgl_dst,
comp_req->comp_pars.out_buffer_sz,
RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
if (ret) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
return ret;
}
comp_req->comn_mid.src_data_addr =
cookie->qat_sgl_src_phys_addr;
comp_req->comn_mid.dest_data_addr =
cookie->qat_sgl_dst_phys_addr;
comp_req->comn_mid.src_length = 0;
comp_req->comn_mid.dst_length = 0;
} else {
/* flat aka linear buffer */
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_FLAT);
comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src);

View File

@ -24,7 +24,16 @@ enum qat_comp_request_type {
REQ_COMP_END
};
struct qat_comp_sgl {
qat_sgl_hdr;
struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
} __rte_packed __rte_cache_aligned;
struct qat_comp_op_cookie {
struct qat_comp_sgl qat_sgl_src;
struct qat_comp_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
};
struct qat_comp_xform {

View File

@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
RTE_COMP_FF_ADLER32_CHECKSUM |
RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
RTE_COMP_FF_HUFFMAN_FIXED,
RTE_COMP_FF_HUFFMAN_FIXED |
RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
.window_size = {.min = 15, .max = 15, .increment = 0} },
{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
@ -71,7 +74,9 @@ static int
qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
uint32_t max_inflight_ops, int socket_id)
{
struct qat_qp *qp;
int ret = 0;
uint32_t i;
struct qat_qp_config qat_qp_conf;
struct qat_qp **qp_addr =
@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
= *qp_addr;
qp = (struct qat_qp *)*qp_addr;
for (i = 0; i < qp->nb_descriptors; i++) {
struct qat_comp_op_cookie *cookie =
qp->op_cookies[i];
cookie->qat_sgl_src_phys_addr =
rte_mempool_virt2iova(cookie) +
offsetof(struct qat_comp_op_cookie,
qat_sgl_src);
cookie->qat_sgl_dst_phys_addr =
rte_mempool_virt2iova(cookie) +
offsetof(struct qat_comp_op_cookie,
qat_sgl_dst);
}
return ret;
}