net/liquidio: add API to send packet to device

Add API to send control and data packets to device. Request list keeps
track of host buffers to be freed till it reaches device.

Signed-off-by: Shijith Thotton <shijith.thotton@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Derek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: Venkat Koppula <venkat.koppula@caviumnetworks.com>
Signed-off-by: Srisivasubramanian S <ssrinivasan@caviumnetworks.com>
Signed-off-by: Mallesham Jatharakonda <mjatharakonda@oneconvergence.com>
This commit is contained in:
Shijith Thotton 2017-03-25 11:54:29 +05:30 committed by Ferruh Yigit
parent 9a84a107b0
commit 64af21f803
4 changed files with 488 additions and 0 deletions

View File

@ -76,6 +76,8 @@ enum lio_card_type {
#define LIO_23XX_NAME "23xx"
#define LIO_DEV_RUNNING 0xc
#define LIO_NUM_DEF_TX_DESCS_CFG(cfg) \
((cfg)->default_config->num_def_tx_descs)
@ -93,6 +95,15 @@ enum lio_card_type {
#define LIO_BASE_MINOR_VERSION 5
#define LIO_BASE_MICRO_VERSION 1
/** Tag types used by Octeon cores in its work. */
enum octeon_tag_type {
OCTEON_ORDERED_TAG = 0,
OCTEON_ATOMIC_TAG = 1,
};
/* pre-defined host->NIC tag values */
#define LIO_CONTROL (0x11111110)
/* Routines for reading and writing CSRs */
#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
#define lio_write_csr(lio_dev, reg_off, value) \

View File

@ -207,6 +207,186 @@ lio_free_instr_queue0(struct lio_device *lio_dev)
lio_dev->num_iqs--;
}
static inline void
lio_ring_doorbell(struct lio_device *lio_dev,
struct lio_instr_queue *iq)
{
if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
rte_write32(iq->fill_cnt, iq->doorbell_reg);
/* make sure doorbell write goes through */
rte_wmb();
iq->fill_cnt = 0;
}
}
static inline void
copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
{
uint8_t *iqptr, cmdsize;
cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
rte_memcpy(iqptr, cmd, cmdsize);
}
static inline struct lio_iq_post_status
post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
{
struct lio_iq_post_status st;
st.status = LIO_IQ_SEND_OK;
/* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr.
*/
if (rte_atomic64_read(&iq->instr_pending) >=
(int32_t)(iq->max_count - 1)) {
st.status = LIO_IQ_SEND_FAILED;
st.index = -1;
return st;
}
if (rte_atomic64_read(&iq->instr_pending) >=
(int32_t)(iq->max_count - 2))
st.status = LIO_IQ_SEND_STOP;
copy_cmd_into_iq(iq, cmd);
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
iq->max_count);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending.
*/
rte_wmb();
rte_atomic64_inc(&iq->instr_pending);
return st;
}
static inline void
lio_add_to_request_list(struct lio_instr_queue *iq,
int idx, void *buf, int reqtype)
{
iq->request_list[idx].buf = buf;
iq->request_list[idx].reqtype = reqtype;
}
static int
lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
void *buf, uint32_t datasize __rte_unused, uint32_t reqtype)
{
struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
struct lio_iq_post_status st;
rte_spinlock_lock(&iq->post_lock);
st = post_command2(iq, cmd);
if (st.status != LIO_IQ_SEND_FAILED) {
lio_add_to_request_list(iq, st.index, buf, reqtype);
lio_ring_doorbell(lio_dev, iq);
}
rte_spinlock_unlock(&iq->post_lock);
return st.status;
}
void
lio_prepare_soft_command(struct lio_device *lio_dev,
struct lio_soft_command *sc, uint8_t opcode,
uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
uint64_t ossp1)
{
struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
RTE_ASSERT(opcode <= 15);
RTE_ASSERT(subcode <= 127);
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
pki_ih3->w = 1;
pki_ih3->raw = 1;
pki_ih3->utag = 1;
pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
pki_ih3->utt = 1;
pki_ih3->tag = LIO_CONTROL;
pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
pki_ih3->pm = 0x7;
pki_ih3->sl = 8;
if (sc->datasize)
ih3->dlengsz = sc->datasize;
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
sc->cmd.cmd3.ossp[0] = ossp0;
sc->cmd.cmd3.ossp[1] = ossp1;
if (sc->rdatasize) {
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
rdp->pcie_port = lio_dev->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
/* PKI IH3 */
ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
} else {
irh->rflag = 0;
/* PKI IH3 */
ih3->fsz = OCTEON_PCI_CMD_O3;
}
}
int
lio_send_soft_command(struct lio_device *lio_dev,
struct lio_soft_command *sc)
{
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
uint32_t len = 0;
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
RTE_ASSERT(sc->dmadptr);
sc->cmd.cmd3.dptr = sc->dmadptr;
}
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
if (irh->rflag) {
RTE_ASSERT(sc->dmarptr);
RTE_ASSERT(sc->status_word != NULL);
*sc->status_word = LIO_COMPLETION_WORD_INIT;
sc->cmd.cmd3.rptr = sc->dmarptr;
}
len = (uint32_t)ih3->dlengsz;
if (sc->wait_time)
sc->timeout = lio_uptime + sc->wait_time;
return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
LIO_REQTYPE_SOFT_COMMAND);
}
int
lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
{

View File

@ -50,11 +50,53 @@
#define lio_uptime \
(size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
#define LIO_IQ_SEND_OK 0
#define LIO_IQ_SEND_STOP 1
#define LIO_IQ_SEND_FAILED -1
/* conditions */
#define LIO_REQTYPE_NONE 0
#define LIO_REQTYPE_NORESP_NET 1
#define LIO_REQTYPE_NORESP_NET_SG 2
#define LIO_REQTYPE_SOFT_COMMAND 3
struct lio_request_list {
uint32_t reqtype;
void *buf;
};
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
struct lio_instr3_64B {
/** Pointer where the input data is available. */
uint64_t dptr;
/** Instruction Header. */
uint64_t ih3;
/** Instruction Header. */
uint64_t pki_ih3;
/** Input Request Header. */
uint64_t irh;
/** opcode/subcode specific parameters */
uint64_t ossp[2];
/** Return Data Parameters */
uint64_t rdp;
/** Pointer where the response for a RAW mode packet will be written
* by Octeon.
*/
uint64_t rptr;
};
union lio_instr_64B {
struct lio_instr3_64B cmd3;
};
/** The size of each buffer in soft command buffer pool */
#define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
@ -67,6 +109,9 @@ struct lio_soft_command {
uint64_t dma_addr;
uint32_t size;
/** Command and return status */
union lio_instr_64B cmd;
#define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
uint64_t *status_word;
@ -93,6 +138,230 @@ struct lio_soft_command {
struct rte_mbuf *mbuf;
};
struct lio_iq_post_status {
int status;
int index;
};
/* wqe
* --------------- 0
* | wqe word0-3 |
* --------------- 32
* | PCI IH |
* --------------- 40
* | RPTR |
* --------------- 48
* | PCI IRH |
* --------------- 56
* | OCTEON_CMD |
* --------------- 64
* | Addtl 8-BData |
* | |
* ---------------
*/
union octeon_cmd {
uint64_t cmd64;
struct {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
uint64_t cmd : 5;
uint64_t more : 6; /* How many udd words follow the command */
uint64_t reserved : 29;
uint64_t param1 : 16;
uint64_t param2 : 8;
#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint64_t param2 : 8;
uint64_t param1 : 16;
uint64_t reserved : 29;
uint64_t more : 6;
uint64_t cmd : 5;
#endif
} s;
};
#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
/* Instruction Header */
struct octeon_instr_ih3 {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
/** Reserved3 */
uint64_t reserved3 : 1;
/** Gather indicator 1=gather*/
uint64_t gather : 1;
/** Data length OR no. of entries in gather list */
uint64_t dlengsz : 14;
/** Front Data size */
uint64_t fsz : 6;
/** Reserved2 */
uint64_t reserved2 : 4;
/** PKI port kind - PKIND */
uint64_t pkind : 6;
/** Reserved1 */
uint64_t reserved1 : 32;
#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** Reserved1 */
uint64_t reserved1 : 32;
/** PKI port kind - PKIND */
uint64_t pkind : 6;
/** Reserved2 */
uint64_t reserved2 : 4;
/** Front Data size */
uint64_t fsz : 6;
/** Data length OR no. of entries in gather list */
uint64_t dlengsz : 14;
/** Gather indicator 1=gather*/
uint64_t gather : 1;
/** Reserved3 */
uint64_t reserved3 : 1;
#endif
};
/* PKI Instruction Header(PKI IH) */
struct octeon_instr_pki_ih3 {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
/** Wider bit */
uint64_t w : 1;
/** Raw mode indicator 1 = RAW */
uint64_t raw : 1;
/** Use Tag */
uint64_t utag : 1;
/** Use QPG */
uint64_t uqpg : 1;
/** Reserved2 */
uint64_t reserved2 : 1;
/** Parse Mode */
uint64_t pm : 3;
/** Skip Length */
uint64_t sl : 8;
/** Use Tag Type */
uint64_t utt : 1;
/** Tag type */
uint64_t tagtype : 2;
/** Reserved1 */
uint64_t reserved1 : 2;
/** QPG Value */
uint64_t qpg : 11;
/** Tag Value */
uint64_t tag : 32;
#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** Tag Value */
uint64_t tag : 32;
/** QPG Value */
uint64_t qpg : 11;
/** Reserved1 */
uint64_t reserved1 : 2;
/** Tag type */
uint64_t tagtype : 2;
/** Use Tag Type */
uint64_t utt : 1;
/** Skip Length */
uint64_t sl : 8;
/** Parse Mode */
uint64_t pm : 3;
/** Reserved2 */
uint64_t reserved2 : 1;
/** Use QPG */
uint64_t uqpg : 1;
/** Use Tag */
uint64_t utag : 1;
/** Raw mode indicator 1 = RAW */
uint64_t raw : 1;
/** Wider bit */
uint64_t w : 1;
#endif
};
/** Input Request Header */
struct octeon_instr_irh {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
uint64_t opcode : 4;
uint64_t rflag : 1;
uint64_t subcode : 7;
uint64_t vlan : 12;
uint64_t priority : 3;
uint64_t reserved : 5;
uint64_t ossp : 32; /* opcode/subcode specific parameters */
#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint64_t ossp : 32; /* opcode/subcode specific parameters */
uint64_t reserved : 5;
uint64_t priority : 3;
uint64_t vlan : 12;
uint64_t subcode : 7;
uint64_t rflag : 1;
uint64_t opcode : 4;
#endif
};
/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
#define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
#define OCTEON_PCI_CMD_O3 (24 + 8)
/** Return Data Parameters */
struct octeon_instr_rdp {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
uint64_t reserved : 49;
uint64_t pcie_port : 3;
uint64_t rlen : 12;
#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint64_t rlen : 12;
uint64_t pcie_port : 3;
uint64_t reserved : 49;
#endif
};
int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
@ -100,6 +369,13 @@ struct lio_soft_command *
lio_alloc_soft_command(struct lio_device *lio_dev,
uint32_t datasize, uint32_t rdatasize,
uint32_t ctxsize);
void lio_prepare_soft_command(struct lio_device *lio_dev,
struct lio_soft_command *sc,
uint8_t opcode, uint8_t subcode,
uint32_t irh_ossp, uint64_t ossp0,
uint64_t ossp1);
int lio_send_soft_command(struct lio_device *lio_dev,
struct lio_soft_command *sc);
void lio_free_soft_command(struct lio_soft_command *sc);
/** Maximum ordered requests to process in every invocation of
@ -167,6 +443,21 @@ lio_swap_8B_data(uint64_t *data, uint32_t blocks)
}
}
/* Macro to increment index.
* Index is incremented by count; if the sum exceeds
* max, index is wrapped-around to the start.
*/
static inline uint32_t
lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
{
if ((index + count) >= max)
index = index + count - max;
else
index += count;
return index;
}
/** Setup instruction queue zero for the device
* @param lio_dev which lio device to setup
*

View File

@ -288,6 +288,12 @@ struct lio_device {
uint16_t pf_num;
uint16_t vf_num;
/** This device's PCIe port used for traffic. */
uint16_t pcie_port;
/** The state of this device */
rte_atomic64_t status;
uint8_t *hw_addr;
struct lio_fn_list fn_list;