ticketlock: introduce fair ticket based locking

The spinlock implementation is unfair, some threads may take locks
aggressively while leaving the other threads starving for long time.

This patch introduces ticketlock which gives each waiting thread a
ticket and they can take the lock one by one. First come, first serviced.
This avoids starvation for too long time and is more predictable.

Suggested-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Joyce Kong 2019-03-25 19:11:07 +08:00 committed by Thomas Monjalon
parent 6fef1ae4fc
commit 184104fc61
5 changed files with 223 additions and 1 deletions

View File

@ -216,6 +216,10 @@ M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_eal/common/include/rte_bitmap.h
F: app/test/test_bitmap.c
Ticketlock
M: Joyce Kong <joyce.kong@arm.com>
F: lib/librte_eal/common/include/generic/rte_ticketlock.h
ARM v7
M: Jan Viktorin <viktorin@rehivetech.com>
M: Gavin Hu <gavin.hu@arm.com>

View File

@ -65,6 +65,7 @@ The public API headers are grouped by topics:
[atomic] (@ref rte_atomic.h),
[rwlock] (@ref rte_rwlock.h),
[spinlock] (@ref rte_spinlock.h)
[ticketlock] (@ref rte_ticketlock.h)
- **CPU arch**:
[branch prediction] (@ref rte_branch_prediction.h),

View File

@ -20,7 +20,8 @@ INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h
INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
GENERIC_INC += rte_memcpy.h rte_cpuflags.h
GENERIC_INC += rte_spinlock.h rte_rwlock.h rte_ticketlock.h
GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk

View File

@ -0,0 +1,215 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Arm Limited
*/
#ifndef _RTE_TICKETLOCK_H_
#define _RTE_TICKETLOCK_H_
/**
* @file
*
* RTE ticket locks
*
* This file defines an API for ticket locks, which give each waiting
* thread a ticket and take the lock one by one, first come, first
* serviced.
*
* All locks must be initialised before use, and only initialised once.
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <rte_common.h>
#include <rte_lcore.h>
#include <rte_pause.h>
/**
* The rte_ticketlock_t type.
*/
typedef union {
uint32_t tickets;
struct {
uint16_t current;
uint16_t next;
} s;
} rte_ticketlock_t;
/**
* A static ticketlock initializer.
*/
#define RTE_TICKETLOCK_INITIALIZER { 0 }
/**
* Initialize the ticketlock to an unlocked state.
*
* @param tl
* A pointer to the ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_init(rte_ticketlock_t *tl)
{
__atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
}
/**
* Take the ticketlock.
*
* @param tl
* A pointer to the ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_lock(rte_ticketlock_t *tl)
{
uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
while (__atomic_load_n(&tl->s.current, __ATOMIC_ACQUIRE) != me)
rte_pause();
}
/**
* Release the ticketlock.
*
* @param tl
* A pointer to the ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_unlock(rte_ticketlock_t *tl)
{
uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
__atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
}
/**
* Try to take the lock.
*
* @param tl
* A pointer to the ticketlock.
* @return
* 1 if the lock is successfully taken; 0 otherwise.
*/
static inline __rte_experimental int
rte_ticketlock_trylock(rte_ticketlock_t *tl)
{
rte_ticketlock_t old, new;
old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
new.tickets = old.tickets;
new.s.next++;
if (old.s.next == old.s.current) {
if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
return 1;
}
return 0;
}
/**
* Test if the lock is taken.
*
* @param tl
* A pointer to the ticketlock.
* @return
* 1 if the lock is currently taken; 0 otherwise.
*/
static inline __rte_experimental int
rte_ticketlock_is_locked(rte_ticketlock_t *tl)
{
rte_ticketlock_t tic;
tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
return (tic.s.current != tic.s.next);
}
/**
* The rte_ticketlock_recursive_t type.
*/
#define TICKET_LOCK_INVALID_ID -1
typedef struct {
rte_ticketlock_t tl; /**< the actual ticketlock */
int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
unsigned int count; /**< count of time this lock has been called */
} rte_ticketlock_recursive_t;
/**
* A static recursive ticketlock initializer.
*/
#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
TICKET_LOCK_INVALID_ID, 0}
/**
* Initialize the recursive ticketlock to an unlocked state.
*
* @param tlr
* A pointer to the recursive ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
{
rte_ticketlock_init(&tlr->tl);
__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
tlr->count = 0;
}
/**
* Take the recursive ticketlock.
*
* @param tlr
* A pointer to the recursive ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
{
int id = rte_gettid();
if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
rte_ticketlock_lock(&tlr->tl);
__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
}
tlr->count++;
}
/**
* Release the recursive ticketlock.
*
* @param tlr
* A pointer to the recursive ticketlock.
*/
static inline __rte_experimental void
rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
{
if (--(tlr->count) == 0) {
__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
__ATOMIC_RELAXED);
rte_ticketlock_unlock(&tlr->tl);
}
}
/**
* Try to take the recursive lock.
*
* @param tlr
* A pointer to the recursive ticketlock.
* @return
* 1 if the lock is successfully taken; 0 otherwise.
*/
static inline __rte_experimental int
rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
{
int id = rte_gettid();
if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
if (rte_ticketlock_trylock(&tlr->tl) == 0)
return 0;
__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
}
tlr->count++;
return 1;
}
#ifdef __cplusplus
}
#endif
#endif /* _RTE_TICKETLOCK_H_ */

View File

@ -99,6 +99,7 @@ generic_headers = files(
'include/generic/rte_prefetch.h',
'include/generic/rte_rwlock.h',
'include/generic/rte_spinlock.h',
'include/generic/rte_ticketlock.h',
'include/generic/rte_vect.h')
install_headers(generic_headers, subdir: 'generic')