pollers: Fix pollers to return correct busy status

Poller should return status > 0 when it did some work
(CPU was used for some time) marking its call as busy
CPU time.

Active pollers should return BUSY status only if they
did any meangful work besides checking some conditions
(e.g. processing requests, do some complicated operations).

Signed-off-by: Maciej Szwed <maciej.szwed@intel.com>
Change-Id: Id4636a0997489b129cecfe785592cc97b50992ba
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2164
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Maciej Szwed 2020-05-04 11:51:27 +02:00 committed by Tomasz Zawadzki
parent 058be487c3
commit eb05cbd677
40 changed files with 143 additions and 122 deletions

View File

@ -207,7 +207,7 @@ void spdk_ioat_flush(struct spdk_ioat_chan *chan);
*
* \param chan I/OAT channel to check for completions.
*
* \return 0 on success, negative errno on failure.
* \return number of events handled on success, negative errno on failure.
*/
int spdk_ioat_process_events(struct spdk_ioat_chan *chan);

View File

@ -47,6 +47,11 @@
extern "C" {
#endif
enum spdk_thread_poller_rc {
SPDK_POLLER_IDLE,
SPDK_POLLER_BUSY,
};
/**
* A stackless, lightweight thread.
*/

View File

@ -2265,7 +2265,7 @@ bdev_channel_poll_qos(void *arg)
* timeslice has actually expired. This should never happen
* with a well-behaved timer implementation.
*/
return 0;
return SPDK_POLLER_IDLE;
}
/* Reset for next round of rate limiting */
@ -2457,7 +2457,7 @@ bdev_poll_timeout_io(void *arg)
ctx = calloc(1, sizeof(struct poll_timeout_ctx));
if (!ctx) {
SPDK_ERRLOG("failed to allocate memory\n");
return 1;
return SPDK_POLLER_BUSY;
}
ctx->desc = desc;
ctx->cb_arg = desc->cb_arg;
@ -2476,7 +2476,7 @@ bdev_poll_timeout_io(void *arg)
ctx,
bdev_channel_poll_timeout_io_done);
return 1;
return SPDK_POLLER_BUSY;
}
int
@ -3114,7 +3114,7 @@ bdev_calculate_measured_queue_depth(void *ctx)
bdev->internal.temporary_queue_depth = 0;
spdk_for_each_channel(__bdev_to_io_dev(bdev), _calculate_measured_qd, bdev,
_calculate_measured_qd_cpl);
return 0;
return SPDK_POLLER_BUSY;
}
void
@ -6458,12 +6458,12 @@ bdev_lock_lba_range_check_io(void *_i)
TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
if (bdev_io_range_is_locked(bdev_io, range)) {
ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
return 1;
return SPDK_POLLER_BUSY;
}
}
spdk_for_each_channel_continue(i, 0);
return 1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -2088,7 +2088,7 @@ _blobfs_cache_pool_reclaim(void *arg)
int rc;
if (!blobfs_cache_pool_need_reclaim()) {
return 0;
return SPDK_POLLER_IDLE;
}
TAILQ_FOREACH_SAFE(file, &g_caches, cache_tailq, tmp) {
@ -2099,7 +2099,7 @@ _blobfs_cache_pool_reclaim(void *arg)
continue;
}
if (!blobfs_cache_pool_need_reclaim()) {
return 1;
return SPDK_POLLER_BUSY;
}
break;
}
@ -2112,7 +2112,7 @@ _blobfs_cache_pool_reclaim(void *arg)
continue;
}
if (!blobfs_cache_pool_need_reclaim()) {
return 1;
return SPDK_POLLER_BUSY;
}
break;
}
@ -2126,7 +2126,7 @@ _blobfs_cache_pool_reclaim(void *arg)
break;
}
return 1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -1116,7 +1116,7 @@ rpc_subsystem_init_poller_ctx(void *ctx)
free(poller_ctx);
}
return 1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -198,10 +198,10 @@ rpc_client_poller(void *arg)
if (rc == 0) {
/* No response yet */
return -1;
return SPDK_POLLER_BUSY;
} else if (rc < 0) {
app_json_config_load_done(ctx, rc);
return -1;
return SPDK_POLLER_BUSY;
}
resp = spdk_jsonrpc_client_get_response(ctx->client_conn);
@ -235,7 +235,7 @@ rpc_client_poller(void *arg)
}
return -1;
return SPDK_POLLER_BUSY;
}
static int
@ -255,9 +255,11 @@ rpc_client_connect_poller(void *_ctx)
if (rc) {
app_json_config_load_done(ctx, rc);
}
return SPDK_POLLER_IDLE;
}
return -1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -49,7 +49,7 @@ static int
rpc_subsystem_poll(void *arg)
{
spdk_rpc_accept();
return -1;
return SPDK_POLLER_BUSY;
}
void

View File

@ -2410,7 +2410,7 @@ ftl_io_channel_poll(void *arg)
TAILQ_HEAD(, ftl_io) retry_queue;
if (TAILQ_EMPTY(&ch->write_cmpl_queue) && TAILQ_EMPTY(&ch->retry_queue)) {
return 0;
return SPDK_POLLER_IDLE;
}
while (!TAILQ_EMPTY(&ch->write_cmpl_queue)) {
@ -2436,7 +2436,7 @@ ftl_io_channel_poll(void *arg)
}
}
return 1;
return SPDK_POLLER_BUSY;
}
int
@ -2447,14 +2447,14 @@ ftl_task_core(void *ctx)
if (dev->halt) {
if (ftl_shutdown_complete(dev)) {
spdk_poller_unregister(&dev->core_poller);
return 0;
return SPDK_POLLER_IDLE;
}
}
ftl_process_writes(dev);
ftl_process_relocs(dev);
return 0;
return SPDK_POLLER_BUSY;
}
SPDK_LOG_REGISTER_COMPONENT("ftl_core", SPDK_LOG_FTL_CORE)

View File

@ -1632,7 +1632,7 @@ ftl_halt_poller(void *ctx)
}
}
return 0;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -318,7 +318,7 @@ static int
ioat_process_channel_events(struct spdk_ioat_chan *ioat)
{
struct ioat_descriptor *desc;
uint64_t status, completed_descriptor, hw_desc_phys_addr;
uint64_t status, completed_descriptor, hw_desc_phys_addr, events_count = 0;
uint32_t tail;
if (ioat->head == ioat->tail) {
@ -347,10 +347,12 @@ ioat_process_channel_events(struct spdk_ioat_chan *ioat)
hw_desc_phys_addr = desc->phys_addr;
ioat->tail++;
events_count++;
} while (hw_desc_phys_addr != completed_descriptor);
ioat->last_seen = hw_desc_phys_addr;
return 0;
return events_count;
}
static void

View File

@ -512,10 +512,10 @@ iscsi_conn_remove_lun(void *ctx)
int lun_id = spdk_scsi_lun_get_id(lun);
if (!iscsi_conn_check_tasks_for_lun(conn, lun)) {
return -1;
return SPDK_POLLER_BUSY;
}
iscsi_conn_close_lun(conn, lun_id);
return -1;
return SPDK_POLLER_BUSY;
}
static void
@ -648,7 +648,7 @@ _iscsi_conn_check_shutdown(void *arg)
rc = iscsi_conn_free_tasks(conn);
if (rc < 0) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&conn->shutdown_timer);
@ -656,7 +656,7 @@ _iscsi_conn_check_shutdown(void *arg)
iscsi_conn_stop(conn);
iscsi_conn_free(conn);
return 1;
return SPDK_POLLER_BUSY;
}
static void
@ -688,14 +688,14 @@ _iscsi_conn_check_pending_tasks(void *arg)
if (conn->dev != NULL &&
spdk_scsi_dev_has_pending_tasks(conn->dev, conn->initiator_port)) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&conn->shutdown_timer);
_iscsi_conn_destruct(conn);
return 1;
return SPDK_POLLER_BUSY;
}
void
@ -783,14 +783,14 @@ static int
iscsi_conn_check_shutdown(void *arg)
{
if (iscsi_get_active_conns(NULL) != 0) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&g_shutdown_timer);
spdk_thread_send_msg(spdk_get_thread(), iscsi_conn_check_shutdown_cb, NULL);
return 1;
return SPDK_POLLER_BUSY;
}
static void
@ -826,7 +826,7 @@ logout_request_timeout(void *arg)
conn->state = ISCSI_CONN_STATE_EXITING;
}
return -1;
return SPDK_POLLER_BUSY;
}
/* If the connection is running and logout is not requested yet, request logout
@ -1647,7 +1647,7 @@ logout_timeout(void *arg)
conn->state = ISCSI_CONN_STATE_EXITING;
}
return -1;
return SPDK_POLLER_BUSY;
}
void

View File

@ -3589,12 +3589,12 @@ _iscsi_op_abort_task(void *arg)
rc = iscsi_conn_abort_queued_datain_task(task->conn, task->scsi.abort_id);
if (rc != 0) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&task->mgmt_poller);
iscsi_queue_mgmt_task(task->conn, task);
return 1;
return SPDK_POLLER_BUSY;
}
static void
@ -3614,12 +3614,12 @@ _iscsi_op_abort_task_set(void *arg)
rc = iscsi_conn_abort_queued_datain_tasks(task->conn, task->scsi.lun,
task->pdu);
if (rc != 0) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&task->mgmt_poller);
iscsi_queue_mgmt_task(task->conn, task);
return 1;
return SPDK_POLLER_BUSY;
}
void

View File

@ -1189,7 +1189,7 @@ iscsi_poll_group_poll(void *ctx)
int rc;
if (spdk_unlikely(STAILQ_EMPTY(&group->connections))) {
return 0;
return SPDK_POLLER_IDLE;
}
rc = spdk_sock_group_poll(group->sock_group);
@ -1203,7 +1203,7 @@ iscsi_poll_group_poll(void *ctx)
}
}
return rc;
return rc != 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int
@ -1216,7 +1216,7 @@ iscsi_poll_group_handle_nop(void *ctx)
iscsi_conn_handle_nop(conn);
}
return -1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -656,14 +656,14 @@ iscsi_tgt_node_check_active_conns(void *arg)
struct spdk_iscsi_tgt_node *target = arg;
if (iscsi_get_active_conns(target) != 0) {
return 1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&target->destruct_poller);
spdk_scsi_dev_destruct(target->dev, _iscsi_tgt_node_destruct, target);
return 1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -818,7 +818,7 @@ nbd_poll(void *arg)
spdk_nbd_stop(nbd);
}
return -1;
return SPDK_POLLER_BUSY;
}
static void *
@ -942,7 +942,7 @@ nbd_enable_kernel(void *arg)
NBD_BUSY_POLLING_INTERVAL_US);
}
/* If the kernel is busy, check back later */
return 0;
return SPDK_POLLER_BUSY;
}
SPDK_ERRLOG("ioctl(NBD_SET_SOCK) failed: %s\n", spdk_strerror(errno));
@ -957,7 +957,7 @@ nbd_enable_kernel(void *arg)
}
free(ctx);
return 1;
return SPDK_POLLER_BUSY;
}
if (ctx->poller) {
@ -966,7 +966,7 @@ nbd_enable_kernel(void *arg)
nbd_start_complete(ctx);
return 1;
return SPDK_POLLER_BUSY;
}
void

View File

@ -181,7 +181,7 @@ nvmf_ctrlr_keep_alive_poll(void *ctx)
}
}
return 1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -99,12 +99,12 @@ nvmf_poll_group_poll(void *ctx)
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
rc = nvmf_transport_poll_group_poll(tgroup);
if (rc < 0) {
return -1;
return SPDK_POLLER_BUSY;
}
count += rc;
}
return count;
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int

View File

@ -3542,7 +3542,7 @@ nvmf_rdma_destroy_defunct_qpair(void *ctx)
nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
nvmf_rdma_qpair_destroy(rqpair);
return 0;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -1056,7 +1056,7 @@ nvmf_tcp_qpair_handle_timeout(void *ctx)
SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT);
nvmf_tcp_qpair_disconnect(tqpair);
return 0;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -95,12 +95,12 @@ scsi_lun_reset_check_outstanding_tasks(void *arg)
struct spdk_scsi_lun *lun = task->lun;
if (scsi_lun_has_outstanding_tasks(lun)) {
return 0;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&lun->reset_poller);
scsi_lun_complete_mgmt_task(lun, task);
return 1;
return SPDK_POLLER_BUSY;
}
void
@ -299,12 +299,12 @@ scsi_lun_check_io_channel(void *arg)
struct spdk_scsi_lun *lun = (struct spdk_scsi_lun *)arg;
if (lun->io_channel) {
return -1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&lun->hotremove_poller);
scsi_lun_remove(lun);
return -1;
return SPDK_POLLER_BUSY;
}
static void
@ -339,12 +339,12 @@ scsi_lun_check_outstanding_tasks(void *arg)
if (scsi_lun_has_outstanding_tasks(lun) ||
scsi_lun_has_outstanding_mgmt_tasks(lun)) {
return -1;
return SPDK_POLLER_BUSY;
}
spdk_poller_unregister(&lun->hotremove_poller);
scsi_lun_notify_hot_remove(lun);
return -1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -688,7 +688,7 @@ vdev_worker(void *arg)
vhost_session_used_signal(vsession);
return -1;
return SPDK_POLLER_BUSY;
}
static void
@ -776,7 +776,7 @@ no_bdev_vdev_worker(void *arg)
bvsession->io_channel = NULL;
}
return -1;
return SPDK_POLLER_BUSY;
}
static struct spdk_vhost_blk_session *
@ -972,11 +972,11 @@ destroy_session_poller_cb(void *arg)
int i;
if (vsession->task_cnt > 0) {
return -1;
return SPDK_POLLER_BUSY;
}
if (spdk_vhost_trylock() != 0) {
return -1;
return SPDK_POLLER_BUSY;
}
for (i = 0; i < vsession->max_queues; i++) {
@ -997,7 +997,7 @@ destroy_session_poller_cb(void *arg)
vhost_session_stop_done(vsession, 0);
spdk_vhost_unlock();
return -1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -583,11 +583,11 @@ nvme_worker(void *arg)
int count = -1;
if (spdk_unlikely(!nvme->num_sqs)) {
return -1;
return SPDK_POLLER_IDLE;
}
if (spdk_unlikely(!nvme->dataplane_started && !nvme->bar)) {
return -1;
return SPDK_POLLER_IDLE;
}
for (qid = 1; qid <= MAX_IO_QUEUES; qid++) {
@ -598,7 +598,7 @@ nvme_worker(void *arg)
}
cq = vhost_nvme_get_cq_from_qid(nvme, sq->cqid);
if (spdk_unlikely(!cq)) {
return -1;
return SPDK_POLLER_BUSY;
}
cq->guest_signaled_cq_head = vhost_nvme_get_queue_head(nvme, cq_offset(sq->cqid, 1));
if (spdk_unlikely(!STAILQ_EMPTY(&cq->cq_full_waited_tasks) &&
@ -620,7 +620,7 @@ nvme_worker(void *arg)
task = STAILQ_FIRST(&nvme->free_tasks);
STAILQ_REMOVE_HEAD(&nvme->free_tasks, stailq);
} else {
return -1;
return SPDK_POLLER_BUSY;
}
task->cmd = sq->sq_cmd[sq->sq_head];
@ -1113,7 +1113,7 @@ destroy_device_poller_cb(void *arg)
/* FIXME wait for pending I/Os to complete */
if (spdk_vhost_trylock() != 0) {
return -1;
return SPDK_POLLER_BUSY;
}
for (i = 0; i < nvme->num_ns; i++) {
@ -1137,7 +1137,7 @@ destroy_device_poller_cb(void *arg)
vhost_session_stop_done(nvme->vsession, 0);
spdk_vhost_unlock();
return -1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -769,7 +769,7 @@ vdev_mgmt_worker(void *arg)
process_vq(svsession, &vsession->virtqueue[VIRTIO_SCSI_CONTROLQ]);
vhost_vq_used_signal(vsession, &vsession->virtqueue[VIRTIO_SCSI_CONTROLQ]);
return -1;
return SPDK_POLLER_BUSY;
}
static int
@ -785,7 +785,7 @@ vdev_worker(void *arg)
vhost_session_used_signal(vsession);
return -1;
return SPDK_POLLER_BUSY;
}
static struct spdk_vhost_scsi_dev *
@ -1364,11 +1364,11 @@ destroy_session_poller_cb(void *arg)
uint32_t i;
if (vsession->task_cnt > 0) {
return -1;
return SPDK_POLLER_BUSY;
}
if (spdk_vhost_trylock() != 0) {
return -1;
return SPDK_POLLER_BUSY;
}
for (i = 0; i < vsession->max_queues; i++) {
@ -1408,7 +1408,7 @@ destroy_session_poller_cb(void *arg)
vhost_session_stop_done(vsession, 0);
spdk_vhost_unlock();
return -1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -184,9 +184,8 @@ ioat_poll(void *arg)
{
struct spdk_ioat_chan *chan = arg;
spdk_ioat_process_events(chan);
return -1;
return spdk_ioat_process_events(chan) != 0 ? SPDK_POLLER_BUSY :
SPDK_POLLER_IDLE;
}
static struct spdk_io_channel *ioat_get_io_channel(void);

View File

@ -323,7 +323,7 @@ bdev_aio_group_poll(void *arg)
nr = bdev_user_io_getevents(group_ch->io_ctx, SPDK_AIO_QUEUE_DEPTH, events);
if (nr < 0) {
return -1;
return SPDK_POLLER_IDLE;
}
for (i = 0; i < nr; i++) {
@ -338,7 +338,7 @@ bdev_aio_group_poll(void *arg)
aio_task->ch->io_inflight--;
}
return nr;
return nr > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static void
@ -384,7 +384,7 @@ bdev_aio_reset_retry_timer(void *arg)
fdisk,
_bdev_aio_get_io_inflight_done);
return -1;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -702,7 +702,7 @@ comp_dev_poller(void *args)
}
}
}
return 0;
return num_deq == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
}
/* Entry point for reduce lib to issue a compress operation. */

View File

@ -199,7 +199,7 @@ _delay_finish_io(void *arg)
_process_io_stailq(&delay_ch->p99_read_io, ticks);
_process_io_stailq(&delay_ch->p99_write_io, ticks);
return 0;
return SPDK_POLLER_BUSY;
}
/* Completion callback for IO that were issued from this bdev. The original bdev_io

View File

@ -378,27 +378,29 @@ bdev_iscsi_poll_lun(void *_lun)
if (poll(&pfd, 1, 0) < 0) {
SPDK_ERRLOG("poll failed\n");
return -1;
return SPDK_POLLER_IDLE;
}
if (pfd.revents != 0) {
if (iscsi_service(lun->context, pfd.revents) < 0) {
SPDK_ERRLOG("iscsi_service failed: %s\n", iscsi_get_error(lun->context));
}
return SPDK_POLLER_BUSY;
}
return -1;
return SPDK_POLLER_IDLE;
}
static int
bdev_iscsi_no_master_ch_poll(void *arg)
{
struct bdev_iscsi_lun *lun = arg;
int rc = 0;
enum spdk_thread_poller_rc rc = SPDK_POLLER_IDLE;
if (pthread_mutex_trylock(&lun->mutex)) {
/* Don't care about the error code here. */
return -1;
return SPDK_POLLER_IDLE;
}
if (lun->ch_count == 0) {
@ -754,6 +756,10 @@ iscsi_bdev_conn_poll(void *arg)
struct pollfd pfd;
struct iscsi_context *context;
if (TAILQ_EMPTY(&g_iscsi_conn_req)) {
return SPDK_POLLER_IDLE;
}
TAILQ_FOREACH_SAFE(req, &g_iscsi_conn_req, link, tmp) {
context = req->context;
pfd.fd = iscsi_get_fd(context);
@ -761,7 +767,7 @@ iscsi_bdev_conn_poll(void *arg)
pfd.revents = 0;
if (poll(&pfd, 1, 0) < 0) {
SPDK_ERRLOG("poll failed\n");
return -1;
return SPDK_POLLER_BUSY;
}
if (pfd.revents != 0) {
@ -784,7 +790,7 @@ iscsi_bdev_conn_poll(void *arg)
_bdev_iscsi_conn_req_free(req);
}
}
return -1;
return SPDK_POLLER_BUSY;
}
int

View File

@ -368,7 +368,7 @@ null_io_poll(void *arg)
TAILQ_SWAP(&ch->io, &io, spdk_bdev_io, module_link);
if (TAILQ_EMPTY(&io)) {
return 0;
return SPDK_POLLER_IDLE;
}
while (!TAILQ_EMPTY(&io)) {
@ -377,7 +377,7 @@ null_io_poll(void *arg)
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
}
return 1;
return SPDK_POLLER_BUSY;
}
static int

View File

@ -258,7 +258,7 @@ bdev_nvme_poll(void *arg)
}
}
return num_completions;
return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int
@ -276,7 +276,7 @@ bdev_nvme_poll_adminq(void *arg)
bdev_nvme_reset(nvme_bdev_ctrlr, NULL, true);
}
return rc;
return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
}
static int
@ -1569,17 +1569,16 @@ bdev_nvme_hotplug(void *arg)
hotplug_probe_cb,
attach_cb, remove_cb);
if (!g_hotplug_probe_ctx) {
return -1;
return SPDK_POLLER_BUSY;
}
}
done = spdk_nvme_probe_poll_async(g_hotplug_probe_ctx);
if (done != -EAGAIN) {
g_hotplug_probe_ctx = NULL;
return 1;
}
return -1;
return SPDK_POLLER_BUSY;
}
void
@ -1740,7 +1739,7 @@ bdev_nvme_async_poll(void *arg)
free(ctx);
}
return 1;
return SPDK_POLLER_BUSY;
}
int

View File

@ -942,7 +942,7 @@ bdev_ocssd_poll_mm(void *ctx)
}
}
return 0;
return SPDK_POLLER_BUSY;
}
void

View File

@ -157,14 +157,14 @@ nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
/* If we have already registered a poller, let that one take care of it. */
if (nvme_bdev_ctrlr->destruct_poller != NULL) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return 1;
return SPDK_POLLER_IDLE;
}
if (nvme_bdev_ctrlr->resetting) {
nvme_bdev_ctrlr->destruct_poller =
SPDK_POLLER_REGISTER((spdk_poller_fn)nvme_bdev_ctrlr_destruct, nvme_bdev_ctrlr, 1000);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return 1;
return SPDK_POLLER_BUSY;
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
@ -179,7 +179,7 @@ nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
}
spdk_io_device_unregister(nvme_bdev_ctrlr, nvme_bdev_unregister_cb);
return 1;
return SPDK_POLLER_BUSY;
}
void

View File

@ -342,13 +342,13 @@ cleaner_poll(void *arg)
if (spdk_get_ticks() >= priv->next_run) {
ocf_cleaner_run(cleaner, priv->queue);
return 1;
return SPDK_POLLER_BUSY;
}
if (iono > 0) {
return 1;
return SPDK_POLLER_BUSY;
} else {
return 0;
return SPDK_POLLER_IDLE;
}
}

View File

@ -783,9 +783,9 @@ queue_poll(void *opaque)
}
if (iono > 0) {
return 1;
return SPDK_POLLER_BUSY;
} else {
return 0;
return SPDK_POLLER_IDLE;
}
}
@ -891,9 +891,9 @@ mngt_queue_poll(void *opaque)
}
if (iono > 0) {
return 1;
return SPDK_POLLER_BUSY;
} else {
return 0;
return SPDK_POLLER_IDLE;
}
}

View File

@ -347,7 +347,7 @@ bdev_rbd_reset_timer(void *arg)
spdk_poller_unregister(&disk->reset_timer);
disk->reset_bdev_io = NULL;
return -1;
return SPDK_POLLER_BUSY;
}
static int
@ -468,7 +468,7 @@ bdev_rbd_io_poll(void *arg)
/* check the return value of poll since we have only one fd for each channel */
if (rc != 1) {
return 0;
return SPDK_POLLER_BUSY;
}
rc = rbd_poll_io_events(ch->image, comps, SPDK_RBD_QUEUE_DEPTH);
@ -504,7 +504,7 @@ bdev_rbd_io_poll(void *arg)
}
}
return rc;
return rc > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static void

View File

@ -261,7 +261,7 @@ bdev_uring_group_poll(void *arg)
}
if (ret < 0) {
return 1;
return SPDK_POLLER_BUSY;
}
count = 0;
@ -269,7 +269,11 @@ bdev_uring_group_poll(void *arg)
count = bdev_uring_reap(&group_ch->uring, to_complete);
}
return (count + to_submit);
if (count + to_submit > 0) {
return SPDK_POLLER_BUSY;
} else {
return SPDK_POLLER_IDLE;
}
}
static void bdev_uring_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,

View File

@ -811,7 +811,7 @@ bdev_virtio_poll(void *arg)
if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) {
if (svdev->removed) {
_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
return -1;
return SPDK_POLLER_BUSY;
}
if (scan_ctx->restart) {
@ -831,9 +831,9 @@ bdev_virtio_poll(void *arg)
if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) {
if (svdev->removed) {
_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
return -1;
return SPDK_POLLER_BUSY;
} else if (cnt == 0) {
return 0;
return SPDK_POLLER_IDLE;
}
rc = send_scan_io(scan_ctx);

View File

@ -111,7 +111,11 @@ acceptor_poll(void *arg)
count = spdk_nvmf_tgt_accept(tgt);
return count;
if (count > 0) {
return SPDK_POLLER_BUSY;
} else {
return SPDK_POLLER_IDLE;
}
}
static void

View File

@ -517,7 +517,7 @@ vpp_queue_poller(void *ctx)
vl_msg_api_handler((void *)msg);
}
return 0;
return SPDK_POLLER_BUSY;
}
static int
@ -532,7 +532,7 @@ app_queue_poller(void *ctx)
handle_mq_event(e);
svm_msg_q_free_msg(g_svm.app_event_queue, &msg);
}
return 0;
return SPDK_POLLER_BUSY;
}
/* This is required until sock.c API changes to asynchronous */
@ -1449,7 +1449,7 @@ vpp_application_detached_timeout(void *arg)
/* We need to finish detach on initial thread */
spdk_thread_send_msg(g_svm.init_thread, vpp_application_detached, NULL);
}
return 0;
return SPDK_POLLER_BUSY;
}
static void

View File

@ -923,7 +923,7 @@ test_poller(void)
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
rc = comp_dev_poller((void *)&g_comp_bdev);
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
CU_ASSERT(rc == 0);
CU_ASSERT(rc == SPDK_POLLER_BUSY);
/* Success from dequeue, 2 ops. nothing needing to be resubmitted.
*/
@ -942,7 +942,7 @@ test_poller(void)
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
rc = comp_dev_poller((void *)&g_comp_bdev);
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
CU_ASSERT(rc == 0);
CU_ASSERT(rc == SPDK_POLLER_BUSY);
/* Success from dequeue, one op to be resubmitted.
*/
@ -970,7 +970,7 @@ test_poller(void)
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
rc = comp_dev_poller((void *)&g_comp_bdev);
CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
CU_ASSERT(rc == 0);
CU_ASSERT(rc == SPDK_POLLER_BUSY);
/* op_to_queue is freed in code under test */
free(cb_args);