f8f6b1c174
A mempool consumes 3 memzones (with the default ring mempool driver). The default DPDK configuration allows RTE_MAX_MEMZONE (2560) memzones. Assuming there is no other memzones that means that we can have a maximum of 853 mempools. In the vhost library, the IOTLB cache code so far was requesting a mempool per vq, which means that at the maximum, the vhost library could request mempools for 426 qps. This limit was recently reached on big systems with a lot of virtio ports (and multiqueue in use). While the limit on mempool count could be something we fix at the DPDK project level, there is no reason to use mempools for the IOTLB cache: - the IOTLB cache entries do not need to be DMA-able and are only used by the current process (in multiprocess context), - getting/putting objects from/in the mempool is always associated with some other locks, so some level of lock contention is already present, We can convert to a malloc'd pool with objects put in a free list protected by a spinlock. Signed-off-by: David Marchand <david.marchand@redhat.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
54 lines
1.6 KiB
C
54 lines
1.6 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright (c) 2017 Red Hat, Inc.
|
|
*/
|
|
|
|
#ifndef _VHOST_IOTLB_H_
|
|
#define _VHOST_IOTLB_H_
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include "vhost.h"
|
|
|
|
static __rte_always_inline void
|
|
vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq)
|
|
{
|
|
rte_rwlock_read_lock(&vq->iotlb_lock);
|
|
}
|
|
|
|
static __rte_always_inline void
|
|
vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq)
|
|
{
|
|
rte_rwlock_read_unlock(&vq->iotlb_lock);
|
|
}
|
|
|
|
static __rte_always_inline void
|
|
vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq)
|
|
{
|
|
rte_rwlock_write_lock(&vq->iotlb_lock);
|
|
}
|
|
|
|
static __rte_always_inline void
|
|
vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
|
|
{
|
|
rte_rwlock_write_unlock(&vq->iotlb_lock);
|
|
}
|
|
|
|
void vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
|
|
uint64_t iova, uint64_t uaddr,
|
|
uint64_t size, uint8_t perm);
|
|
void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
|
|
uint64_t iova, uint64_t size);
|
|
uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
|
|
uint64_t *size, uint8_t perm);
|
|
bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
|
|
uint8_t perm);
|
|
void vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
|
|
uint64_t iova, uint8_t perm);
|
|
void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
|
|
uint64_t size, uint8_t perm);
|
|
void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);
|
|
int vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq);
|
|
void vhost_user_iotlb_destroy(struct vhost_virtqueue *vq);
|
|
|
|
#endif /* _VHOST_IOTLB_H_ */
|