Revert r270808, which were changes to common code (i40e_alloc.h).
Rather than #define-ing common code function calls to OS-dependent ones, make the osdep versions match the common code expectations, adjust the FreeBSD specific code to use those, and remove the #defines. In the FreeBSD specific code, use "i40e_mem_reserved" for the now expected but unused argument to i40e_allocate_dma_mem(). Reviewed by: gnn, eric.joyner intel.com MFC after: 3 days
This commit is contained in:
parent
9bce9009cd
commit
d94ca7cf07
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=271834
@ -51,15 +51,16 @@ enum i40e_memory_type {
|
||||
};
|
||||
|
||||
/* prototype for functions used for dynamic memory allocation */
|
||||
enum i40e_status_code i40e_allocate_dma(struct i40e_hw *hw,
|
||||
enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
|
||||
struct i40e_dma_mem *mem,
|
||||
bus_size_t size, u32 alignment);
|
||||
enum i40e_status_code i40e_free_dma(struct i40e_hw *hw,
|
||||
enum i40e_memory_type type,
|
||||
u64 size, u32 alignment);
|
||||
enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
|
||||
struct i40e_dma_mem *mem);
|
||||
enum i40e_status_code i40e_allocate_virt(struct i40e_hw *hw,
|
||||
enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
|
||||
struct i40e_virt_mem *mem,
|
||||
u32 size);
|
||||
enum i40e_status_code i40e_free_virt(struct i40e_hw *hw,
|
||||
enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
|
||||
struct i40e_virt_mem *mem);
|
||||
|
||||
#endif /* _I40E_ALLOC_H_ */
|
||||
|
@ -49,22 +49,22 @@ i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
|
||||
}
|
||||
|
||||
i40e_status
|
||||
i40e_allocate_virt(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
|
||||
i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
|
||||
{
|
||||
mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
return(mem->va == NULL);
|
||||
}
|
||||
|
||||
i40e_status
|
||||
i40e_free_virt(struct i40e_hw *hw, struct i40e_virt_mem *mem)
|
||||
i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
|
||||
{
|
||||
free(mem->va, M_DEVBUF);
|
||||
return(0);
|
||||
}
|
||||
|
||||
i40e_status
|
||||
i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *mem,
|
||||
bus_size_t size, u32 alignment)
|
||||
i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
|
||||
enum i40e_memory_type type __unused, u64 size, u32 alignment)
|
||||
{
|
||||
device_t dev = ((struct i40e_osdep *)hw->back)->dev;
|
||||
int err;
|
||||
@ -122,7 +122,7 @@ i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *mem,
|
||||
}
|
||||
|
||||
i40e_status
|
||||
i40e_free_dma(struct i40e_hw *hw, struct i40e_dma_mem *mem)
|
||||
i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
|
||||
{
|
||||
bus_dmamap_sync(mem->tag, mem->map,
|
||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
||||
|
@ -170,9 +170,6 @@ struct i40e_hw; /* forward decl */
|
||||
u16 i40e_read_pci_cfg(struct i40e_hw *, u32);
|
||||
void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
|
||||
|
||||
#define i40e_allocate_dma_mem(h, m, unused, s, a) i40e_allocate_dma(h, m, s, a)
|
||||
#define i40e_free_dma_mem(h, m) i40e_free_dma(h, m)
|
||||
|
||||
#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
|
||||
extern void i40e_debug_d(void *hw, u32 mask, char *fmt_str, ...);
|
||||
|
||||
@ -180,8 +177,6 @@ struct i40e_virt_mem {
|
||||
void *va;
|
||||
u32 size;
|
||||
};
|
||||
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt(h, m, s)
|
||||
#define i40e_free_virt_mem(h, m) i40e_free_virt(h, m)
|
||||
|
||||
/*
|
||||
** This hardware supports either 16 or 32 byte rx descriptors
|
||||
|
@ -2591,7 +2591,7 @@ ixl_free_vsi(struct ixl_vsi *vsi)
|
||||
IXL_TX_LOCK(txr);
|
||||
ixl_free_que_tx(que);
|
||||
if (txr->base)
|
||||
i40e_free_dma(&pf->hw, &txr->dma);
|
||||
i40e_free_dma_mem(&pf->hw, &txr->dma);
|
||||
IXL_TX_UNLOCK(txr);
|
||||
IXL_TX_LOCK_DESTROY(txr);
|
||||
|
||||
@ -2600,7 +2600,7 @@ ixl_free_vsi(struct ixl_vsi *vsi)
|
||||
IXL_RX_LOCK(rxr);
|
||||
ixl_free_que_rx(que);
|
||||
if (rxr->base)
|
||||
i40e_free_dma(&pf->hw, &rxr->dma);
|
||||
i40e_free_dma_mem(&pf->hw, &rxr->dma);
|
||||
IXL_RX_UNLOCK(rxr);
|
||||
IXL_RX_LOCK_DESTROY(rxr);
|
||||
|
||||
@ -2668,8 +2668,8 @@ ixl_setup_stations(struct ixl_pf *pf)
|
||||
tsize = roundup2((que->num_desc *
|
||||
sizeof(struct i40e_tx_desc)) +
|
||||
sizeof(u32), DBA_ALIGN);
|
||||
if (i40e_allocate_dma(&pf->hw,
|
||||
&txr->dma, tsize, DBA_ALIGN)) {
|
||||
if (i40e_allocate_dma_mem(&pf->hw,
|
||||
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate TX Descriptor memory\n");
|
||||
error = ENOMEM;
|
||||
@ -2708,8 +2708,8 @@ ixl_setup_stations(struct ixl_pf *pf)
|
||||
device_get_nameunit(dev), que->me);
|
||||
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
|
||||
|
||||
if (i40e_allocate_dma(&pf->hw,
|
||||
&rxr->dma, rsize, 4096)) {
|
||||
if (i40e_allocate_dma_mem(&pf->hw,
|
||||
&rxr->dma, i40e_mem_reserved, rsize, 4096)) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX Descriptor memory\n");
|
||||
error = ENOMEM;
|
||||
@ -2735,9 +2735,9 @@ ixl_setup_stations(struct ixl_pf *pf)
|
||||
rxr = &que->rxr;
|
||||
txr = &que->txr;
|
||||
if (rxr->base)
|
||||
i40e_free_dma(&pf->hw, &rxr->dma);
|
||||
i40e_free_dma_mem(&pf->hw, &rxr->dma);
|
||||
if (txr->base)
|
||||
i40e_free_dma(&pf->hw, &txr->dma);
|
||||
i40e_free_dma_mem(&pf->hw, &txr->dma);
|
||||
}
|
||||
|
||||
early:
|
||||
|
@ -1457,8 +1457,8 @@ ixlv_setup_queues(struct ixlv_sc *sc)
|
||||
tsize = roundup2((que->num_desc *
|
||||
sizeof(struct i40e_tx_desc)) +
|
||||
sizeof(u32), DBA_ALIGN);
|
||||
if (i40e_allocate_dma(&sc->hw,
|
||||
&txr->dma, tsize, DBA_ALIGN)) {
|
||||
if (i40e_allocate_dma_mem(&sc->hw,
|
||||
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate TX Descriptor memory\n");
|
||||
error = ENOMEM;
|
||||
@ -1497,8 +1497,8 @@ ixlv_setup_queues(struct ixlv_sc *sc)
|
||||
device_get_nameunit(dev), que->me);
|
||||
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
|
||||
|
||||
if (i40e_allocate_dma(&sc->hw,
|
||||
&rxr->dma, rsize, 4096)) { //JFV - should this be DBA?
|
||||
if (i40e_allocate_dma_mem(&sc->hw,
|
||||
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX Descriptor memory\n");
|
||||
error = ENOMEM;
|
||||
@ -1525,9 +1525,9 @@ ixlv_setup_queues(struct ixlv_sc *sc)
|
||||
rxr = &que->rxr;
|
||||
txr = &que->txr;
|
||||
if (rxr->base)
|
||||
i40e_free_dma(&sc->hw, &rxr->dma);
|
||||
i40e_free_dma_mem(&sc->hw, &rxr->dma);
|
||||
if (txr->base)
|
||||
i40e_free_dma(&sc->hw, &txr->dma);
|
||||
i40e_free_dma_mem(&sc->hw, &txr->dma);
|
||||
}
|
||||
|
||||
early:
|
||||
@ -2346,7 +2346,7 @@ ixlv_free_queues(struct ixl_vsi *vsi)
|
||||
IXL_TX_LOCK(txr);
|
||||
ixl_free_que_tx(que);
|
||||
if (txr->base)
|
||||
i40e_free_dma(&sc->hw, &txr->dma);
|
||||
i40e_free_dma_mem(&sc->hw, &txr->dma);
|
||||
IXL_TX_UNLOCK(txr);
|
||||
IXL_TX_LOCK_DESTROY(txr);
|
||||
|
||||
@ -2355,7 +2355,7 @@ ixlv_free_queues(struct ixl_vsi *vsi)
|
||||
IXL_RX_LOCK(rxr);
|
||||
ixl_free_que_rx(que);
|
||||
if (rxr->base)
|
||||
i40e_free_dma(&sc->hw, &rxr->dma);
|
||||
i40e_free_dma_mem(&sc->hw, &rxr->dma);
|
||||
IXL_RX_UNLOCK(rxr);
|
||||
IXL_RX_LOCK_DESTROY(rxr);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user