ioat_spec: add spdk_ prefixes

Change-Id: I91444f10b98d7e247af5eb2fea719e283a1156a2
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-02-11 10:39:28 -07:00
parent 6ce73aa6e7
commit c7150a5611
3 changed files with 84 additions and 84 deletions

View File

@ -31,37 +31,37 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IOAT_SPEC_H__
#define __IOAT_SPEC_H__
#ifndef SPDK_IOAT_SPEC_H
#define SPDK_IOAT_SPEC_H
#include <inttypes.h>
#include "spdk/assert.h"
#define IOAT_INTRCTRL_MASTER_INT_EN 0x01
#define SPDK_IOAT_INTRCTRL_MASTER_INT_EN 0x01
#define IOAT_VER_3_0 0x30
#define IOAT_VER_3_3 0x33
#define SPDK_IOAT_VER_3_0 0x30
#define SPDK_IOAT_VER_3_3 0x33
/* DMA Channel Registers */
#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
#define IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define SPDK_IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
#define SPDK_IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
#define SPDK_IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define SPDK_IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define SPDK_IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define SPDK_IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define SPDK_IOAT_CHANCTRL_INT_REARM 0x0001
/* DMA Channel Capabilities */
#define IOAT_DMACAP_PB (1 << 0)
#define IOAT_DMACAP_DCA (1 << 4)
#define IOAT_DMACAP_BFILL (1 << 6)
#define IOAT_DMACAP_XOR (1 << 8)
#define IOAT_DMACAP_PQ (1 << 9)
#define IOAT_DMACAP_DMA_DIF (1 << 10)
#define SPDK_IOAT_DMACAP_PB (1 << 0)
#define SPDK_IOAT_DMACAP_DCA (1 << 4)
#define SPDK_IOAT_DMACAP_BFILL (1 << 6)
#define SPDK_IOAT_DMACAP_XOR (1 << 8)
#define SPDK_IOAT_DMACAP_PQ (1 << 9)
#define SPDK_IOAT_DMACAP_DMA_DIF (1 << 10)
struct ioat_registers {
struct spdk_ioat_registers {
uint8_t chancnt;
uint8_t xfercap;
uint8_t genctrl;
@ -86,24 +86,24 @@ struct ioat_registers {
uint32_t chanerrmask; /* 0xAC */
} __attribute__((packed));
#define IOAT_CHANCMD_RESET 0x20
#define IOAT_CHANCMD_SUSPEND 0x04
#define SPDK_IOAT_CHANCMD_RESET 0x20
#define SPDK_IOAT_CHANCMD_SUSPEND 0x04
#define IOAT_CHANSTS_STATUS 0x7ULL
#define IOAT_CHANSTS_ACTIVE 0x0
#define IOAT_CHANSTS_IDLE 0x1
#define IOAT_CHANSTS_SUSPENDED 0x2
#define IOAT_CHANSTS_HALTED 0x3
#define IOAT_CHANSTS_ARMED 0x4
#define SPDK_IOAT_CHANSTS_STATUS 0x7ULL
#define SPDK_IOAT_CHANSTS_ACTIVE 0x0
#define SPDK_IOAT_CHANSTS_IDLE 0x1
#define SPDK_IOAT_CHANSTS_SUSPENDED 0x2
#define SPDK_IOAT_CHANSTS_HALTED 0x3
#define SPDK_IOAT_CHANSTS_ARMED 0x4
#define IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
#define IOAT_CHANSTS_SOFT_ERROR 0x10ULL
#define SPDK_IOAT_CHANSTS_UNAFFILIATED_ERROR 0x8ULL
#define SPDK_IOAT_CHANSTS_SOFT_ERROR 0x10ULL
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
#define SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK (~0x3FULL)
#define IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
#define SPDK_IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
struct ioat_generic_hw_descriptor {
struct spdk_ioat_generic_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -129,7 +129,7 @@ struct ioat_generic_hw_descriptor {
uint64_t op_specific[4];
};
struct ioat_dma_hw_descriptor {
struct spdk_ioat_dma_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -146,7 +146,7 @@ struct ioat_dma_hw_descriptor {
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t reserved: 13;
#define IOAT_OP_COPY 0x00
#define SPDK_IOAT_OP_COPY 0x00
uint32_t op: 8;
} control;
} u;
@ -159,7 +159,7 @@ struct ioat_dma_hw_descriptor {
uint64_t user2;
};
struct ioat_fill_hw_descriptor {
struct spdk_ioat_fill_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -173,7 +173,7 @@ struct ioat_fill_hw_descriptor {
uint32_t dest_page_break: 1;
uint32_t bundle: 1;
uint32_t reserved3: 15;
#define IOAT_OP_FILL 0x01
#define SPDK_IOAT_OP_FILL 0x01
uint32_t op: 8;
} control;
} u;
@ -186,7 +186,7 @@ struct ioat_fill_hw_descriptor {
uint64_t user2;
};
struct ioat_xor_hw_descriptor {
struct spdk_ioat_xor_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -201,8 +201,8 @@ struct ioat_xor_hw_descriptor {
uint32_t dest_dca: 1;
uint32_t hint: 1;
uint32_t reserved: 13;
#define IOAT_OP_XOR 0x87
#define IOAT_OP_XOR_VAL 0x88
#define SPDK_IOAT_OP_XOR 0x87
#define SPDK_IOAT_OP_XOR_VAL 0x88
uint32_t op: 8;
} control;
} u;
@ -215,7 +215,7 @@ struct ioat_xor_hw_descriptor {
uint64_t src_addr5;
};
struct ioat_xor_ext_hw_descriptor {
struct spdk_ioat_xor_ext_hw_desc {
uint64_t src_addr6;
uint64_t src_addr7;
uint64_t src_addr8;
@ -223,7 +223,7 @@ struct ioat_xor_ext_hw_descriptor {
uint64_t reserved[4];
};
struct ioat_pq_hw_descriptor {
struct spdk_ioat_pq_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -240,8 +240,8 @@ struct ioat_pq_hw_descriptor {
uint32_t p_disable: 1;
uint32_t q_disable: 1;
uint32_t reserved: 11;
#define IOAT_OP_PQ 0x89
#define IOAT_OP_PQ_VAL 0x8a
#define SPDK_IOAT_OP_PQ 0x89
#define SPDK_IOAT_OP_PQ_VAL 0x8a
uint32_t op: 8;
} control;
} u;
@ -254,7 +254,7 @@ struct ioat_pq_hw_descriptor {
uint64_t q_addr;
};
struct ioat_pq_ext_hw_descriptor {
struct spdk_ioat_pq_ext_hw_desc {
uint64_t src_addr4;
uint64_t src_addr5;
uint64_t src_addr6;
@ -264,7 +264,7 @@ struct ioat_pq_ext_hw_descriptor {
uint64_t reserved[2];
};
struct ioat_pq_update_hw_descriptor {
struct spdk_ioat_pq_update_hw_desc {
uint32_t size;
union {
uint32_t control_raw;
@ -282,7 +282,7 @@ struct ioat_pq_update_hw_descriptor {
uint32_t q_disable: 1;
uint32_t reserved: 3;
uint32_t coef: 8;
#define IOAT_OP_PQ_UP 0x8b
#define SPDK_IOAT_OP_PQ_UP 0x8b
uint32_t op: 8;
} control;
} u;
@ -295,21 +295,21 @@ struct ioat_pq_update_hw_descriptor {
uint64_t q_addr;
};
struct ioat_raw_hw_descriptor {
struct spdk_ioat_raw_hw_desc {
uint64_t field[8];
};
union ioat_hw_descriptor {
struct ioat_raw_hw_descriptor raw;
struct ioat_generic_hw_descriptor generic;
struct ioat_dma_hw_descriptor dma;
struct ioat_fill_hw_descriptor fill;
struct ioat_xor_hw_descriptor xor;
struct ioat_xor_ext_hw_descriptor xor_ext;
struct ioat_pq_hw_descriptor pq;
struct ioat_pq_ext_hw_descriptor pq_ext;
struct ioat_pq_update_hw_descriptor pq_update;
union spdk_ioat_hw_desc {
struct spdk_ioat_raw_hw_desc raw;
struct spdk_ioat_generic_hw_desc generic;
struct spdk_ioat_dma_hw_desc dma;
struct spdk_ioat_fill_hw_desc fill;
struct spdk_ioat_xor_hw_desc xor;
struct spdk_ioat_xor_ext_hw_desc xor_ext;
struct spdk_ioat_pq_hw_desc pq;
struct spdk_ioat_pq_ext_hw_desc pq_ext;
struct spdk_ioat_pq_update_hw_desc pq_update;
};
SPDK_STATIC_ASSERT(sizeof(union ioat_hw_descriptor) == 64, "incorrect ioat_hw_descriptor layout");
SPDK_STATIC_ASSERT(sizeof(union spdk_ioat_hw_desc) == 64, "incorrect spdk_ioat_hw_desc layout");
#endif /* __IOAT_SPEC_H__ */
#endif /* SPDK_IOAT_SPEC_H */

View File

@ -74,13 +74,13 @@ ioat_write_chainaddr(struct ioat_channel *ioat, uint64_t addr)
static inline void
ioat_suspend(struct ioat_channel *ioat)
{
ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
ioat->regs->chancmd = SPDK_IOAT_CHANCMD_SUSPEND;
}
static inline void
ioat_reset(struct ioat_channel *ioat)
{
ioat->regs->chancmd = IOAT_CHANCMD_RESET;
ioat->regs->chancmd = SPDK_IOAT_CHANCMD_RESET;
}
static inline uint32_t
@ -89,7 +89,7 @@ ioat_reset_pending(struct ioat_channel *ioat)
uint8_t cmd;
cmd = ioat->regs->chancmd;
return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
return (cmd & SPDK_IOAT_CHANCMD_RESET) == SPDK_IOAT_CHANCMD_RESET;
}
static int
@ -106,7 +106,7 @@ ioat_map_pci_bar(struct ioat_channel *ioat)
return -1;
}
ioat->regs = (volatile struct ioat_registers *)addr;
ioat->regs = (volatile struct spdk_ioat_registers *)addr;
return 0;
}
@ -145,7 +145,7 @@ ioat_get_ring_index(struct ioat_channel *ioat, uint32_t index)
static void
ioat_get_ring_entry(struct ioat_channel *ioat, uint32_t index,
struct ioat_descriptor **desc,
union ioat_hw_descriptor **hw_desc)
union spdk_ioat_hw_desc **hw_desc)
{
uint32_t i = ioat_get_ring_index(ioat, index);
@ -157,7 +157,7 @@ static uint64_t
ioat_get_desc_phys_addr(struct ioat_channel *ioat, uint32_t index)
{
return ioat->hw_ring_phys_addr +
ioat_get_ring_index(ioat, index) * sizeof(union ioat_hw_descriptor);
ioat_get_ring_index(ioat, index) * sizeof(union spdk_ioat_hw_desc);
}
static void
@ -176,7 +176,7 @@ static struct ioat_descriptor *
ioat_prep_null(struct ioat_channel *ioat)
{
struct ioat_descriptor *desc;
union ioat_hw_descriptor *hw_desc;
union spdk_ioat_hw_desc *hw_desc;
if (ioat_get_ring_space(ioat) < 1) {
return NULL;
@ -185,7 +185,7 @@ ioat_prep_null(struct ioat_channel *ioat)
ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
hw_desc->dma.u.control_raw = 0;
hw_desc->dma.u.control.op = IOAT_OP_COPY;
hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
hw_desc->dma.u.control.null = 1;
hw_desc->dma.u.control.completion_update = 1;
@ -206,7 +206,7 @@ ioat_prep_copy(struct ioat_channel *ioat, uint64_t dst,
uint64_t src, uint32_t len)
{
struct ioat_descriptor *desc;
union ioat_hw_descriptor *hw_desc;
union spdk_ioat_hw_desc *hw_desc;
ioat_assert(len <= ioat->max_xfer_size);
@ -217,7 +217,7 @@ ioat_prep_copy(struct ioat_channel *ioat, uint64_t dst,
ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
hw_desc->dma.u.control_raw = 0;
hw_desc->dma.u.control.op = IOAT_OP_COPY;
hw_desc->dma.u.control.op = SPDK_IOAT_OP_COPY;
hw_desc->dma.u.control.completion_update = 1;
hw_desc->dma.size = len;
@ -237,7 +237,7 @@ ioat_prep_fill(struct ioat_channel *ioat, uint64_t dst,
uint64_t fill_pattern, uint32_t len)
{
struct ioat_descriptor *desc;
union ioat_hw_descriptor *hw_desc;
union spdk_ioat_hw_desc *hw_desc;
ioat_assert(len <= ioat->max_xfer_size);
@ -248,7 +248,7 @@ ioat_prep_fill(struct ioat_channel *ioat, uint64_t dst,
ioat_get_ring_entry(ioat, ioat->head, &desc, &hw_desc);
hw_desc->fill.u.control_raw = 0;
hw_desc->fill.u.control.op = IOAT_OP_FILL;
hw_desc->fill.u.control.op = SPDK_IOAT_OP_FILL;
hw_desc->fill.u.control.completion_update = 1;
hw_desc->fill.size = len;
@ -319,7 +319,7 @@ ioat_process_channel_events(struct ioat_channel *ioat)
}
status = *ioat->comp_update;
completed_descriptor = status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
completed_descriptor = status & SPDK_IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
if (is_ioat_halted(status)) {
ioat_printf(ioat, "%s: Channel halted (%x)\n", __func__, ioat->regs->chanerr);
@ -381,7 +381,7 @@ ioat_channel_start(struct ioat_channel *ioat)
}
version = ioat->regs->cbver;
if (version < IOAT_VER_3_0) {
if (version < SPDK_IOAT_VER_3_0) {
ioat_printf(ioat, "%s: unsupported IOAT version %u.%u\n",
__func__, version >> 4, version & 0xF);
return -1;
@ -389,7 +389,7 @@ ioat_channel_start(struct ioat_channel *ioat)
/* Always support DMA copy */
ioat->dma_capabilities = IOAT_ENGINE_COPY_SUPPORTED;
if (ioat->regs->dmacapability & IOAT_DMACAP_BFILL)
if (ioat->regs->dmacapability & SPDK_IOAT_DMACAP_BFILL)
ioat->dma_capabilities |= IOAT_ENGINE_FILL_SUPPORTED;
xfercap = ioat->regs->xfercap;
@ -406,7 +406,7 @@ ioat_channel_start(struct ioat_channel *ioat)
ioat->max_xfer_size = 1U << xfercap;
}
ioat->comp_update = ioat_zmalloc(NULL, sizeof(*ioat->comp_update), IOAT_CHANCMP_ALIGN,
ioat->comp_update = ioat_zmalloc(NULL, sizeof(*ioat->comp_update), SPDK_IOAT_CHANCMP_ALIGN,
&comp_update_bus_addr);
if (ioat->comp_update == NULL) {
return -1;
@ -421,7 +421,7 @@ ioat_channel_start(struct ioat_channel *ioat)
return -1;
}
ioat->hw_ring = ioat_zmalloc(NULL, num_descriptors * sizeof(union ioat_hw_descriptor), 64,
ioat->hw_ring = ioat_zmalloc(NULL, num_descriptors * sizeof(union spdk_ioat_hw_desc), 64,
&ioat->hw_ring_phys_addr);
if (!ioat->hw_ring) {
return -1;
@ -437,7 +437,7 @@ ioat_channel_start(struct ioat_channel *ioat)
ioat_reset_hw(ioat);
ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
ioat->regs->chanctrl = SPDK_IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
ioat_write_chancmp(ioat, comp_update_bus_addr);
ioat_write_chainaddr(ioat, ioat->hw_ring_phys_addr);

View File

@ -61,7 +61,7 @@ struct ioat_channel {
/* Opaque handle to upper layer */
void *device;
uint64_t max_xfer_size;
volatile struct ioat_registers *regs;
volatile struct spdk_ioat_registers *regs;
volatile uint64_t *comp_update;
@ -72,7 +72,7 @@ struct ioat_channel {
uint64_t last_seen;
struct ioat_descriptor *ring;
union ioat_hw_descriptor *hw_ring;
union spdk_ioat_hw_desc *hw_ring;
uint64_t hw_ring_phys_addr;
uint32_t dma_capabilities;
@ -83,25 +83,25 @@ struct ioat_channel {
static inline uint32_t
is_ioat_active(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE;
return (status & SPDK_IOAT_CHANSTS_STATUS) == SPDK_IOAT_CHANSTS_ACTIVE;
}
static inline uint32_t
is_ioat_idle(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE;
return (status & SPDK_IOAT_CHANSTS_STATUS) == SPDK_IOAT_CHANSTS_IDLE;
}
static inline uint32_t
is_ioat_halted(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED;
return (status & SPDK_IOAT_CHANSTS_STATUS) == SPDK_IOAT_CHANSTS_HALTED;
}
static inline uint32_t
is_ioat_suspended(uint64_t status)
{
return (status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED;
return (status & SPDK_IOAT_CHANSTS_STATUS) == SPDK_IOAT_CHANSTS_SUSPENDED;
}
#endif /* __IOAT_INTERNAL_H__ */