pci: rework interrupt handling
Signed-off-by: Intel <intel.com>
This commit is contained in:
parent
eee16c964c
commit
0a45657a67
@ -171,13 +171,16 @@ static void
|
||||
test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
|
||||
{
|
||||
if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
|
||||
printf("null or invalid intr_handle for %s\n", __FUNCTION__);
|
||||
printf("null or invalid intr_handle for %s\n", __func__);
|
||||
flag = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (rte_intr_callback_unregister(intr_handle,
|
||||
test_interrupt_callback, arg) <= 0) {
|
||||
printf("fail to unregister callback\n");
|
||||
test_interrupt_callback, arg) >= 0) {
|
||||
printf("%s: unexpectedly able to unregister itself\n",
|
||||
__func__);
|
||||
flag = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -188,15 +191,12 @@ test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
|
||||
}
|
||||
|
||||
static void
|
||||
test_interrupt_callback_1(struct rte_intr_handle *intr_handle, void *arg)
|
||||
test_interrupt_callback_1(struct rte_intr_handle *intr_handle,
|
||||
__attribute__((unused)) void *arg)
|
||||
{
|
||||
if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
|
||||
printf("null or invalid intr_handle for %s\n", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
if (rte_intr_callback_unregister(intr_handle,
|
||||
test_interrupt_callback_1, arg) <= 0) {
|
||||
printf("fail to unregister callback\n");
|
||||
printf("null or invalid intr_handle for %s\n", __func__);
|
||||
flag = -1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -281,7 +281,7 @@ test_interrupt_disable(void)
|
||||
int
|
||||
test_interrupt(void)
|
||||
{
|
||||
int count = 0, ret = -1;
|
||||
int count, ret;
|
||||
struct rte_intr_handle test_intr_handle;
|
||||
|
||||
if (test_interrupt_init() < 0) {
|
||||
@ -291,6 +291,8 @@ test_interrupt(void)
|
||||
|
||||
printf("check if callback registered can be called\n");
|
||||
|
||||
ret = -1;
|
||||
|
||||
/* check if callback registered can be called */
|
||||
flag = 0;
|
||||
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
|
||||
@ -305,13 +307,28 @@ test_interrupt(void)
|
||||
goto out;
|
||||
}
|
||||
/* check flag in 3 seconds */
|
||||
while (flag == 0 && count++ < 3)
|
||||
for (count = 0; flag == 0 && count < 3; count++)
|
||||
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
|
||||
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
|
||||
if ((ret = rte_intr_callback_unregister(&test_intr_handle,
|
||||
test_interrupt_callback, NULL)) < 0) {
|
||||
printf("rte_intr_callback_unregister() failed with error "
|
||||
"code: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -1;
|
||||
|
||||
if (flag == 0) {
|
||||
printf("registered callback has not been called\n");
|
||||
goto out;
|
||||
} else if (flag < 0) {
|
||||
printf("registered callback failed\n");
|
||||
ret = flag;
|
||||
goto out;
|
||||
}
|
||||
rte_delay_ms(1000);
|
||||
|
||||
printf("start register/unregister test\n");
|
||||
|
||||
@ -386,18 +403,18 @@ test_interrupt(void)
|
||||
"for all\n");
|
||||
goto out;
|
||||
}
|
||||
rte_delay_ms(1000);
|
||||
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
|
||||
printf("start interrupt enable/disable test\n");
|
||||
|
||||
/* check interrupt enable/disable functions */
|
||||
if (test_interrupt_enable() < 0)
|
||||
goto out;
|
||||
rte_delay_ms(1000);
|
||||
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
|
||||
if (test_interrupt_disable() < 0)
|
||||
goto out;
|
||||
rte_delay_ms(1000);
|
||||
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -409,7 +426,7 @@ test_interrupt(void)
|
||||
rte_intr_callback_unregister(&test_intr_handle,
|
||||
test_interrupt_callback_1, (void *)-1);
|
||||
|
||||
rte_delay_ms(2000);
|
||||
rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL);
|
||||
/* deinit */
|
||||
test_interrupt_deinit();
|
||||
|
||||
|
@ -105,6 +105,7 @@ struct rte_intr_source {
|
||||
TAILQ_ENTRY(rte_intr_source) next;
|
||||
struct rte_intr_handle intr_handle; /**< interrupt handle */
|
||||
struct rte_intr_cb_list callbacks; /**< user callbacks */
|
||||
uint32_t active;
|
||||
};
|
||||
|
||||
/* global spinlock for interrupt data operation */
|
||||
@ -123,9 +124,11 @@ int
|
||||
rte_intr_callback_register(struct rte_intr_handle *intr_handle,
|
||||
rte_intr_callback_fn cb, void *cb_arg)
|
||||
{
|
||||
int ret = -1;
|
||||
int ret, wake_thread;
|
||||
struct rte_intr_source *src;
|
||||
int wake_thread = 0;
|
||||
struct rte_intr_callback *callback;
|
||||
|
||||
wake_thread = 0;
|
||||
|
||||
/* first do parameter checking */
|
||||
if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
|
||||
@ -135,8 +138,7 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
|
||||
}
|
||||
|
||||
/* allocate a new interrupt callback entity */
|
||||
struct rte_intr_callback *callback =
|
||||
rte_zmalloc("interrupt callback list",
|
||||
callback = rte_zmalloc("interrupt callback list",
|
||||
sizeof(*callback), 0);
|
||||
if (callback == NULL) {
|
||||
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
|
||||
@ -148,34 +150,37 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
|
||||
rte_spinlock_lock(&intr_lock);
|
||||
|
||||
/* check if there is at least one callback registered for the fd */
|
||||
TAILQ_FOREACH(src, &intr_sources, next)
|
||||
if (src->intr_handle.fd == intr_handle->fd) {
|
||||
if (src->callbacks.tqh_first == NULL)
|
||||
TAILQ_FOREACH(src, &intr_sources, next) {
|
||||
if (src->intr_handle.fd == intr_handle->fd) {
|
||||
/* we had no interrupts for this */
|
||||
wake_thread = 1;
|
||||
if TAILQ_EMPTY(&src->callbacks)
|
||||
wake_thread = 1;
|
||||
|
||||
TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
|
||||
break;
|
||||
TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* No callback registered for this fd */
|
||||
if (src == NULL){
|
||||
/* no existing callbacks for this - add new source */
|
||||
src = rte_zmalloc("interrupt source list", sizeof(*src), 0);
|
||||
if (src == NULL){
|
||||
/* no existing callbacks for this - add new source */
|
||||
if (src == NULL) {
|
||||
if ((src = rte_zmalloc("interrupt source list",
|
||||
sizeof(*src), 0)) == NULL) {
|
||||
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
|
||||
rte_free(callback);
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
} else {
|
||||
src->intr_handle = *intr_handle;
|
||||
TAILQ_INIT(&src->callbacks);
|
||||
TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
|
||||
TAILQ_INSERT_TAIL(&intr_sources, src, next);
|
||||
wake_thread = 1;
|
||||
ret = 0;
|
||||
}
|
||||
src->intr_handle = *intr_handle;
|
||||
TAILQ_INIT(&src->callbacks);
|
||||
|
||||
TAILQ_INSERT_TAIL(&intr_sources, src, next);
|
||||
TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
|
||||
wake_thread = 1;
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
/**
|
||||
* check if need to notify the pipe fd waited by epoll_wait to
|
||||
* rebuild the wait list.
|
||||
@ -184,21 +189,16 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
|
||||
if (write(intr_pipe.writefd, "1", 1) < 0)
|
||||
return -EPIPE;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
|
||||
rte_intr_callback_fn cb_fn, void *cb_arg)
|
||||
{
|
||||
int ret = -1;
|
||||
int ret;
|
||||
struct rte_intr_source *src;
|
||||
struct rte_intr_callback *cb;
|
||||
struct rte_intr_callback *cb, *next;
|
||||
|
||||
/* do parameter checking first */
|
||||
if (intr_handle == NULL || intr_handle->fd < 0) {
|
||||
@ -217,39 +217,43 @@ rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
|
||||
/* No interrupt source registered for the fd */
|
||||
if (src == NULL) {
|
||||
ret = -ENOENT;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
TAILQ_FOREACH(cb, &src->callbacks, next) {
|
||||
if (cb->cb_fn != cb_fn)
|
||||
continue;
|
||||
if (cb_arg == (void *)-1 || cb->cb_arg == cb_arg) {
|
||||
TAILQ_REMOVE(&src->callbacks, cb, next);
|
||||
rte_free(cb);
|
||||
ret ++;
|
||||
/* interrupt source has some active callbacks right now. */
|
||||
} else if (src->active != 0) {
|
||||
ret = -EAGAIN;
|
||||
|
||||
/* ok to remove. */
|
||||
} else {
|
||||
ret = 0;
|
||||
|
||||
/*walk through the callbacks and remove all that match. */
|
||||
for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
|
||||
|
||||
next = TAILQ_NEXT(cb, next);
|
||||
|
||||
if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
|
||||
cb->cb_arg == cb_arg)) {
|
||||
TAILQ_REMOVE(&src->callbacks, cb, next);
|
||||
rte_free(cb);
|
||||
ret++;
|
||||
}
|
||||
}
|
||||
|
||||
if (src->callbacks.tqh_first == NULL) {
|
||||
/* all callbacks for that source are removed. */
|
||||
if (TAILQ_EMPTY(&src->callbacks)) {
|
||||
TAILQ_REMOVE(&intr_sources, src, next);
|
||||
rte_free(src);
|
||||
}
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
/* notify the pipe fd waited by epoll_wait to rebuild the wait list */
|
||||
if (write(intr_pipe.writefd, "1", 1) < 0) {
|
||||
if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
|
||||
ret = -EPIPE;
|
||||
goto error;
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
return ret;
|
||||
|
||||
error:
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
@ -319,13 +323,14 @@ rte_intr_disable(struct rte_intr_handle *intr_handle)
|
||||
static int
|
||||
eal_intr_process_interrupts(struct epoll_event *events, int nfds)
|
||||
{
|
||||
int n, i, active_cb, bytes_read;
|
||||
int n, bytes_read;
|
||||
struct rte_intr_source *src;
|
||||
struct rte_intr_callback *cb;
|
||||
union rte_intr_read_buffer buf;
|
||||
struct rte_intr_callback active_cbs[32];
|
||||
struct rte_intr_callback active_cb;
|
||||
|
||||
for (n = 0; n < nfds; n++) {
|
||||
|
||||
/**
|
||||
* if the pipe fd is ready to read, return out to
|
||||
* rebuild the wait list.
|
||||
@ -346,15 +351,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* for this source, make a copy of all the callbacks,
|
||||
* then unlock the lock, so the callbacks can
|
||||
* themselves manipulate the list for future
|
||||
* instances.
|
||||
*/
|
||||
active_cb = 0;
|
||||
memset(active_cbs, 0, sizeof(active_cbs));
|
||||
TAILQ_FOREACH(cb, &src->callbacks, next)
|
||||
active_cbs[active_cb++] = *cb;
|
||||
/* mark this interrupt source as active and release the lock. */
|
||||
src->active = 1;
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
/* set the length to be read dor different handle type */
|
||||
@ -369,32 +367,47 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
|
||||
bytes_read = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* read out to clear the ready-to-be-read flag
|
||||
* for epoll_wait.
|
||||
*/
|
||||
bytes_read = read(events[n].data.fd, &buf, bytes_read);
|
||||
|
||||
if (bytes_read < 0) {
|
||||
RTE_LOG(ERR, EAL, "Error reading from file descriptor"
|
||||
" %d, error: %d\n", events[n].data.fd, errno);
|
||||
continue;
|
||||
}
|
||||
else if (bytes_read == 0) {
|
||||
RTE_LOG(ERR, EAL,
|
||||
"Read nothing from file descriptor %d.\n",
|
||||
events[n].data.fd);
|
||||
continue;
|
||||
}
|
||||
/**
|
||||
* Finally, call all callbacks from the copy
|
||||
* we made earlier.
|
||||
*/
|
||||
for (i = 0; i < active_cb; i++) {
|
||||
if (active_cbs[i].cb_fn == NULL)
|
||||
continue;
|
||||
active_cbs[i].cb_fn(&src->intr_handle,
|
||||
active_cbs[i].cb_arg);
|
||||
|
||||
/* grab a lock, again to call callbacks and update status. */
|
||||
rte_spinlock_lock(&intr_lock);
|
||||
|
||||
if (bytes_read > 0) {
|
||||
|
||||
/* Finally, call all callbacks. */
|
||||
TAILQ_FOREACH(cb, &src->callbacks, next) {
|
||||
|
||||
/* make a copy and unlock. */
|
||||
active_cb = *cb;
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
|
||||
/* call the actual callback */
|
||||
active_cb.cb_fn(&src->intr_handle,
|
||||
active_cb.cb_arg);
|
||||
|
||||
/*get the lcok back. */
|
||||
rte_spinlock_lock(&intr_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* we done with that interrupt source, release it. */
|
||||
src->active = 0;
|
||||
rte_spinlock_unlock(&intr_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -124,6 +124,7 @@ struct rte_eth_dev_callback {
|
||||
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
|
||||
void *cb_arg; /**< Parameter for callback */
|
||||
enum rte_eth_event_type event; /**< Interrupt event type */
|
||||
uint32_t active; /**< Callback is executing */
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -1575,40 +1576,38 @@ rte_eth_dev_callback_register(uint8_t port_id,
|
||||
enum rte_eth_event_type event,
|
||||
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
||||
{
|
||||
int ret = -1;
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_dev_callback *user_cb = NULL;
|
||||
struct rte_eth_dev_callback *user_cb;
|
||||
|
||||
if (!cb_fn)
|
||||
return -1;
|
||||
return (-EINVAL);
|
||||
if (port_id >= nb_ports) {
|
||||
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
|
||||
return -1;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
dev = &rte_eth_devices[port_id];
|
||||
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
||||
|
||||
TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
|
||||
if (user_cb->cb_fn == cb_fn &&
|
||||
user_cb->cb_arg == cb_arg &&
|
||||
user_cb->event == event) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
user_cb = rte_malloc("INTR_USER_CALLBACK",
|
||||
sizeof(struct rte_eth_dev_callback), 0);
|
||||
if (!user_cb)
|
||||
goto out;
|
||||
user_cb->cb_fn = cb_fn;
|
||||
user_cb->cb_arg = cb_arg;
|
||||
user_cb->event = event;
|
||||
TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
/* create a new callback. */
|
||||
if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
|
||||
sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
|
||||
user_cb->cb_fn = cb_fn;
|
||||
user_cb->cb_arg = cb_arg;
|
||||
user_cb->event = event;
|
||||
TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
||||
|
||||
return ret;
|
||||
return ((user_cb == NULL) ? -ENOMEM : 0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -1616,38 +1615,51 @@ rte_eth_dev_callback_unregister(uint8_t port_id,
|
||||
enum rte_eth_event_type event,
|
||||
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
||||
{
|
||||
int ret = -1;
|
||||
int ret;
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_dev_callback *cb_lst = NULL;
|
||||
struct rte_eth_dev_callback *cb, *next;
|
||||
|
||||
if (!cb_fn)
|
||||
return -1;
|
||||
return (-EINVAL);
|
||||
if (port_id >= nb_ports) {
|
||||
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
|
||||
return -1;
|
||||
return (-EINVAL);
|
||||
}
|
||||
|
||||
dev = &rte_eth_devices[port_id];
|
||||
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
||||
TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
|
||||
if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
|
||||
|
||||
ret = 0;
|
||||
for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
|
||||
|
||||
next = TAILQ_NEXT(cb, next);
|
||||
|
||||
if (cb->cb_fn != cb_fn || cb->event != event ||
|
||||
(cb->cb_arg != (void *)-1 &&
|
||||
cb->cb_arg != cb_arg))
|
||||
continue;
|
||||
if (cb_lst->cb_arg == (void *)-1 ||
|
||||
cb_lst->cb_arg == cb_arg) {
|
||||
TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
|
||||
rte_free(cb_lst);
|
||||
ret = 0;
|
||||
|
||||
/*
|
||||
* if this callback is not executing right now,
|
||||
* then remove it.
|
||||
*/
|
||||
if (cb->active == 0) {
|
||||
TAILQ_REMOVE(&(dev->callbacks), cb, next);
|
||||
rte_free(cb);
|
||||
} else {
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
||||
|
||||
return ret;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
_rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
|
||||
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
|
||||
enum rte_eth_event_type event)
|
||||
{
|
||||
struct rte_eth_dev_callback *cb_lst = NULL;
|
||||
struct rte_eth_dev_callback *cb_lst;
|
||||
struct rte_eth_dev_callback dev_cb;
|
||||
|
||||
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
||||
@ -1655,11 +1667,12 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type e
|
||||
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
|
||||
continue;
|
||||
dev_cb = *cb_lst;
|
||||
cb_lst->active = 1;
|
||||
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
||||
dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
|
||||
dev_cb.cb_arg);
|
||||
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
||||
cb_lst->active = 0;
|
||||
}
|
||||
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,7 @@
|
||||
/* structure for interrupt relative data */
|
||||
struct e1000_interrupt {
|
||||
uint32_t flags;
|
||||
uint32_t mask;
|
||||
};
|
||||
|
||||
/* local vfta copy */
|
||||
|
@ -74,7 +74,7 @@ static void eth_igb_infos_get(struct rte_eth_dev *dev,
|
||||
struct rte_eth_dev_info *dev_info);
|
||||
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||
struct rte_eth_fc_conf *fc_conf);
|
||||
static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
|
||||
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
|
||||
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
|
||||
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
|
||||
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
|
||||
@ -258,6 +258,25 @@ rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
igb_intr_enable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct e1000_interrupt *intr =
|
||||
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
struct e1000_hw *hw =
|
||||
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
static void
|
||||
igb_intr_disable(struct e1000_hw *hw)
|
||||
{
|
||||
E1000_WRITE_REG(hw, E1000_IMC, ~0);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
static void
|
||||
igb_identify_hardware(struct rte_eth_dev *dev)
|
||||
{
|
||||
@ -387,6 +406,12 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
|
||||
rte_intr_callback_register(&(pci_dev->intr_handle),
|
||||
eth_igb_interrupt_handler, (void *)eth_dev);
|
||||
|
||||
/* enable uio intr after callback register */
|
||||
rte_intr_enable(&(pci_dev->intr_handle));
|
||||
|
||||
/* enable support intr */
|
||||
igb_intr_enable(eth_dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_late:
|
||||
@ -523,8 +548,6 @@ eth_igb_start(struct rte_eth_dev *dev)
|
||||
|
||||
PMD_INIT_LOG(DEBUG, ">>");
|
||||
|
||||
igb_intr_disable(hw);
|
||||
|
||||
/* Power up the phy. Needed to make the link go Up */
|
||||
e1000_power_up_phy(hw);
|
||||
|
||||
@ -649,14 +672,11 @@ eth_igb_start(struct rte_eth_dev *dev)
|
||||
e1000_setup_link(hw);
|
||||
|
||||
/* check if lsc interrupt feature is enabled */
|
||||
if (dev->data->dev_conf.intr_conf.lsc != 0) {
|
||||
ret = eth_igb_interrupt_setup(dev);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "Unable to setup interrupts");
|
||||
igb_dev_clear_queues(dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (dev->data->dev_conf.intr_conf.lsc != 0)
|
||||
ret = eth_igb_lsc_interrupt_setup(dev);
|
||||
|
||||
/* resume enabled intr since hw reset */
|
||||
igb_intr_enable(dev);
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "<<");
|
||||
|
||||
@ -1408,12 +1428,6 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
igb_intr_disable(struct e1000_hw *hw)
|
||||
{
|
||||
E1000_WRITE_REG(hw, E1000_IMC, ~0);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* It enables the interrupt mask and then enable the interrupt.
|
||||
@ -1426,14 +1440,12 @@ igb_intr_disable(struct e1000_hw *hw)
|
||||
* - On failure, a negative value.
|
||||
*/
|
||||
static int
|
||||
eth_igb_interrupt_setup(struct rte_eth_dev *dev)
|
||||
eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct e1000_hw *hw =
|
||||
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct e1000_interrupt *intr =
|
||||
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
intr->mask |= E1000_ICR_LSC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1458,8 +1470,12 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
|
||||
struct e1000_interrupt *intr =
|
||||
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
|
||||
igb_intr_disable(hw);
|
||||
|
||||
/* read-on-clear nic registers here */
|
||||
icr = E1000_READ_REG(hw, E1000_ICR);
|
||||
|
||||
intr->flags = 0;
|
||||
if (icr & E1000_ICR_LSC) {
|
||||
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
|
||||
}
|
||||
@ -1488,51 +1504,54 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
|
||||
struct rte_eth_link link;
|
||||
int ret;
|
||||
|
||||
if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
|
||||
return -1;
|
||||
|
||||
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
|
||||
igb_intr_enable(dev);
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
|
||||
/* set get_link_status to check register later */
|
||||
hw->mac.get_link_status = 1;
|
||||
ret = eth_igb_link_update(dev, 0);
|
||||
if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
|
||||
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
|
||||
|
||||
/* check if link has changed */
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
/* set get_link_status to check register later */
|
||||
hw->mac.get_link_status = 1;
|
||||
ret = eth_igb_link_update(dev, 0);
|
||||
|
||||
memset(&link, 0, sizeof(link));
|
||||
rte_igb_dev_atomic_read_link_status(dev, &link);
|
||||
if (link.link_status) {
|
||||
PMD_INIT_LOG(INFO,
|
||||
" Port %d: Link Up - speed %u Mbps - %s\n",
|
||||
dev->data->port_id, (unsigned)link.link_speed,
|
||||
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
|
||||
"full-duplex" : "half-duplex");
|
||||
} else {
|
||||
PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
|
||||
dev->data->port_id);
|
||||
/* check if link has changed */
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
|
||||
memset(&link, 0, sizeof(link));
|
||||
rte_igb_dev_atomic_read_link_status(dev, &link);
|
||||
if (link.link_status) {
|
||||
PMD_INIT_LOG(INFO,
|
||||
" Port %d: Link Up - speed %u Mbps - %s\n",
|
||||
dev->data->port_id, (unsigned)link.link_speed,
|
||||
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
|
||||
"full-duplex" : "half-duplex");
|
||||
} else {
|
||||
PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
|
||||
dev->data->port_id);
|
||||
}
|
||||
PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
|
||||
dev->pci_dev->addr.domain,
|
||||
dev->pci_dev->addr.bus,
|
||||
dev->pci_dev->addr.devid,
|
||||
dev->pci_dev->addr.function);
|
||||
tctl = E1000_READ_REG(hw, E1000_TCTL);
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
if (link.link_status) {
|
||||
/* enable Tx/Rx */
|
||||
tctl |= E1000_TCTL_EN;
|
||||
rctl |= E1000_RCTL_EN;
|
||||
} else {
|
||||
/* disable Tx/Rx */
|
||||
tctl &= ~E1000_TCTL_EN;
|
||||
rctl &= ~E1000_RCTL_EN;
|
||||
}
|
||||
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
|
||||
}
|
||||
PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
|
||||
dev->pci_dev->addr.domain,
|
||||
dev->pci_dev->addr.bus,
|
||||
dev->pci_dev->addr.devid,
|
||||
dev->pci_dev->addr.function);
|
||||
tctl = E1000_READ_REG(hw, E1000_TCTL);
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
if (link.link_status) {
|
||||
/* enable Tx/Rx */
|
||||
tctl |= E1000_TCTL_EN;
|
||||
rctl |= E1000_RCTL_EN;
|
||||
} else {
|
||||
/* disable Tx/Rx */
|
||||
tctl &= ~E1000_TCTL_EN;
|
||||
rctl &= ~E1000_RCTL_EN;
|
||||
}
|
||||
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1556,7 +1575,6 @@ eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
||||
|
||||
eth_igb_interrupt_get_status(dev);
|
||||
eth_igb_interrupt_action(dev);
|
||||
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1648,7 +1666,7 @@ igbvf_intr_disable(struct e1000_hw *hw)
|
||||
PMD_INIT_LOG(DEBUG, "igbvf_intr_disable");
|
||||
|
||||
/* Clear interrupt mask to stop from interrupts being generated */
|
||||
E1000_WRITE_REG(hw, E1000_EIMC, ~0);
|
||||
E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
|
||||
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
@ -1665,7 +1683,7 @@ igbvf_stop_adapter(struct rte_eth_dev *dev)
|
||||
eth_igb_infos_get(dev, &dev_info);
|
||||
|
||||
/* Clear interrupt mask to stop from interrupts being generated */
|
||||
E1000_WRITE_REG(hw, E1000_EIMC, ~0);
|
||||
igbvf_intr_disable(hw);
|
||||
|
||||
/* Clear any pending interrupts, flush previous writes */
|
||||
E1000_READ_REG(hw, E1000_EICR);
|
||||
@ -1752,10 +1770,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
|
||||
static int
|
||||
igbvf_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct e1000_hw *hw =
|
||||
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
int ret;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "igbvf_dev_start");
|
||||
|
||||
hw->mac.ops.reset_hw(hw);
|
||||
|
||||
/* Set all vfta */
|
||||
igbvf_set_vfta_all(dev,1);
|
||||
|
||||
|
@ -132,7 +132,7 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||
static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||
struct rte_eth_pfc_conf *pfc_conf);
|
||||
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
|
||||
static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
|
||||
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
|
||||
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
|
||||
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
|
||||
static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
|
||||
@ -354,6 +354,18 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
ixgbe_enable_intr(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
struct ixgbe_hw *hw =
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
|
||||
*/
|
||||
@ -577,8 +589,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
|
||||
}
|
||||
hw->fc.send_xon = 1;
|
||||
|
||||
ixgbe_disable_intr(hw);
|
||||
|
||||
/* Make sure we have a good EEPROM before we read from it */
|
||||
diag = ixgbe_validate_eeprom_checksum(hw, &csum);
|
||||
if (diag != IXGBE_SUCCESS) {
|
||||
@ -616,6 +626,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* disable interrupt */
|
||||
ixgbe_disable_intr(hw);
|
||||
|
||||
/* pick up the PCI bus settings for reporting later */
|
||||
ixgbe_get_bus_info(hw);
|
||||
|
||||
@ -662,6 +675,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
|
||||
rte_intr_callback_register(&(pci_dev->intr_handle),
|
||||
ixgbe_dev_interrupt_handler, (void *)eth_dev);
|
||||
|
||||
/* enable uio intr after callback register */
|
||||
rte_intr_enable(&(pci_dev->intr_handle));
|
||||
|
||||
/* enable support intr */
|
||||
ixgbe_enable_intr(eth_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1117,10 +1136,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
|
||||
/* reinitialize adapter
|
||||
* this calls reset and start */
|
||||
ixgbe_init_hw(hw);
|
||||
hw->mac.ops.start_hw(hw);
|
||||
|
||||
/* initialize transmission unit */
|
||||
ixgbe_dev_tx_init(dev);
|
||||
|
||||
|
||||
/* This can fail when allocating mbufs for descriptor rings */
|
||||
err = ixgbe_dev_rx_init(dev);
|
||||
if (err) {
|
||||
@ -1177,11 +1197,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
|
||||
goto error;
|
||||
|
||||
/* check if lsc interrupt is enabled */
|
||||
if (dev->data->dev_conf.intr_conf.lsc != 0) {
|
||||
err = ixgbe_dev_interrupt_setup(dev);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->dev_conf.intr_conf.lsc != 0)
|
||||
ixgbe_dev_lsc_interrupt_setup(dev);
|
||||
|
||||
/* resume enabled intr since hw reset */
|
||||
ixgbe_enable_intr(dev);
|
||||
|
||||
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
|
||||
ETH_VLAN_EXTEND_MASK;
|
||||
@ -1644,14 +1664,13 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
|
||||
* - On failure, a negative value.
|
||||
*/
|
||||
static int
|
||||
ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
|
||||
ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
|
||||
ixgbe_dev_link_status_print(dev);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
intr->mask |= IXGBE_EICR_LSC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1674,12 +1693,14 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
/* clear all cause mask */
|
||||
ixgbe_disable_intr(hw);
|
||||
|
||||
/* read-on-clear nic registers here */
|
||||
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
|
||||
PMD_INIT_LOG(INFO, "eicr %x", eicr);
|
||||
PMD_DRV_LOG(INFO, "eicr %x", eicr);
|
||||
|
||||
intr->flags = 0;
|
||||
if (eicr & IXGBE_EICR_LSC) {
|
||||
/* set flag for async link update */
|
||||
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
|
||||
@ -1737,11 +1758,43 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
int64_t timeout;
|
||||
struct rte_eth_link link;
|
||||
int intr_enable_delay = false;
|
||||
|
||||
if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
|
||||
return -1;
|
||||
PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
|
||||
|
||||
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
|
||||
/* get the link status before link update, for predicting later */
|
||||
memset(&link, 0, sizeof(link));
|
||||
rte_ixgbe_dev_atomic_read_link_status(dev, &link);
|
||||
|
||||
ixgbe_dev_link_update(dev, 0);
|
||||
|
||||
/* likely to up */
|
||||
if (!link.link_status)
|
||||
/* handle it 1 sec later, wait it being stable */
|
||||
timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
|
||||
/* likely to down */
|
||||
else
|
||||
/* handle it 4 sec later, wait it being stable */
|
||||
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
|
||||
|
||||
ixgbe_dev_link_status_print(dev);
|
||||
|
||||
intr_enable_delay = true;
|
||||
}
|
||||
|
||||
if (intr_enable_delay) {
|
||||
if (rte_eal_alarm_set(timeout * 1000,
|
||||
ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
|
||||
PMD_DRV_LOG(ERR, "Error setting alarm");
|
||||
} else {
|
||||
PMD_DRV_LOG(DEBUG, "enable intr immediately");
|
||||
ixgbe_enable_intr(dev);
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
}
|
||||
ixgbe_dev_link_update(dev, 0);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1766,19 +1819,17 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
|
||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
struct ixgbe_hw *hw =
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
IXGBE_READ_REG(hw, IXGBE_EICR);
|
||||
ixgbe_dev_interrupt_action(dev);
|
||||
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
|
||||
ixgbe_dev_link_update(dev, 0);
|
||||
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
ixgbe_dev_link_status_print(dev);
|
||||
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
|
||||
}
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler\n");
|
||||
ixgbe_enable_intr(dev);
|
||||
rte_intr_enable(&(dev->pci_dev->intr_handle));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1797,34 +1848,9 @@ static void
|
||||
ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
||||
void *param)
|
||||
{
|
||||
int64_t timeout;
|
||||
struct rte_eth_link link;
|
||||
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
|
||||
struct ixgbe_interrupt *intr =
|
||||
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
|
||||
/* get the link status before link update, for predicting later */
|
||||
memset(&link, 0, sizeof(link));
|
||||
rte_ixgbe_dev_atomic_read_link_status(dev, &link);
|
||||
ixgbe_dev_interrupt_get_status(dev);
|
||||
ixgbe_dev_interrupt_action(dev);
|
||||
|
||||
if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
|
||||
return;
|
||||
|
||||
/* likely to up */
|
||||
if (!link.link_status)
|
||||
/* handle it 1 sec later, wait it being stable */
|
||||
timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
|
||||
/* likely to down */
|
||||
else
|
||||
/* handle it 4 sec later, wait it being stable */
|
||||
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
|
||||
|
||||
ixgbe_dev_link_status_print(dev);
|
||||
if (rte_eal_alarm_set(timeout * 1000,
|
||||
ixgbe_dev_interrupt_delayed_handler, param) < 0)
|
||||
PMD_INIT_LOG(ERR, "Error setting alarm");
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2160,10 +2186,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
|
||||
static int
|
||||
ixgbevf_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ixgbe_hw *hw =
|
||||
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
int err, mask = 0;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
|
||||
|
||||
hw->mac.ops.reset_hw(hw);
|
||||
|
||||
ixgbevf_dev_tx_init(dev);
|
||||
|
||||
/* This can fail when allocating mbufs for descriptor rings */
|
||||
|
@ -79,6 +79,7 @@ struct ixgbe_hw_fdir_info {
|
||||
/* structure for interrupt relative data */
|
||||
struct ixgbe_interrupt {
|
||||
uint32_t flags;
|
||||
uint32_t mask;
|
||||
};
|
||||
|
||||
struct ixgbe_stat_mapping_registers {
|
||||
|
Loading…
Reference in New Issue
Block a user