Don't import 0 into vmem quantum caches.

vmem uses UMA cache zones to implement the quantum cache.  Since
uma_zalloc() returns 0 (NULL) to signal an allocation failure, UMA
should not be used to cache resource 0.  Fix this by ensuring that 0 is
never cached in UMA in the first place, and by modifying vmem_alloc()
to fall back to a search of the free lists if the cache is depleted,
rather than blocking in qc_import().

Reported by and discussed with:	Brett Gutstein <bgutstein@rice.edu>
Reviewed by:	alc
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D17483
This commit is contained in:
Mark Johnston 2018-10-22 16:16:42 +00:00
parent d3a4b0dabc
commit 21744c825f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339599
2 changed files with 23 additions and 16 deletions

View File

@ -504,6 +504,9 @@ bt_insfree(vmem_t *vm, bt_t *bt)
/*
* Import from the arena into the quantum cache in UMA.
*
* We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
* failure, so UMA can't be used to cache a resource with value 0.
*/
static int
qc_import(void *arg, void **store, int cnt, int domain, int flags)
@ -512,19 +515,16 @@ qc_import(void *arg, void **store, int cnt, int domain, int flags)
vmem_addr_t addr;
int i;
KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
qc = arg;
if ((flags & VMEM_FITMASK) == 0)
flags |= M_BESTFIT;
for (i = 0; i < cnt; i++) {
if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
break;
store[i] = (void *)addr;
/* Only guarantee one allocation. */
flags &= ~M_WAITOK;
flags |= M_NOWAIT;
}
return i;
return (i);
}
/*
@ -1123,15 +1123,20 @@ vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
if (size <= vm->vm_qcache_max) {
/*
* Resource 0 cannot be cached, so avoid a blocking allocation
* in qc_import() and give the vmem_xalloc() call below a chance
* to return 0.
*/
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
if (*addrp == 0)
return (ENOMEM);
return (0);
*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
(flags & ~M_WAITOK) | M_NOWAIT);
if (__predict_true(*addrp != 0))
return (0);
}
return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
flags, addrp);
return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
flags, addrp));
}
int
@ -1263,7 +1268,8 @@ vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
qcache_t *qc;
MPASS(size > 0);
if (size <= vm->vm_qcache_max) {
if (size <= vm->vm_qcache_max &&
__predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
uma_zfree(qc->qc_cache, (void *)addr);
} else

View File

@ -41,8 +41,9 @@ typedef struct vmem vmem_t;
typedef uintptr_t vmem_addr_t;
typedef size_t vmem_size_t;
#define VMEM_ADDR_MIN 0
#define VMEM_ADDR_MAX (~(vmem_addr_t)0)
#define VMEM_ADDR_MIN 0
#define VMEM_ADDR_QCACHE_MIN 1
#define VMEM_ADDR_MAX (~(vmem_addr_t)0)
typedef int (vmem_import_t)(void *, vmem_size_t, int, vmem_addr_t *);
typedef void (vmem_release_t)(void *, vmem_addr_t, vmem_size_t);