Fix integer truncation bug in malloc(9)
A couple of internal functions used by malloc(9) and uma truncated a size_t down to an int. This could cause any number of issues (e.g. indefinite sleeps, memory corruption) if any kernel subsystem tried to allocate 2GB or more through malloc. zfs would attempt such an allocation when run on a system with 2TB or more of RAM. Note to self: When this is MFCed, sparc64 needs the same fix. Differential revision: https://reviews.freebsd.org/D2106 Reviewed by: kib Reported by: Michael Fuckner <michael@fuckner.net> Tested by: Michael Fuckner <michael@fuckner.net> MFC after: 2 weeks
This commit is contained in:
parent
67c45e2f58
commit
57feb6fb43
@ -41,7 +41,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/vmparam.h>
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_paddr_t pa;
|
||||
@ -70,7 +70,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
}
|
||||
|
||||
void
|
||||
uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_paddr_t pa;
|
||||
|
@ -340,7 +340,8 @@ static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_pte_release(pt_entry_t *pte);
|
||||
static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
|
||||
#ifdef PAE
|
||||
static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
|
||||
static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
|
||||
int wait);
|
||||
#endif
|
||||
static void pmap_set_pg(void);
|
||||
|
||||
@ -658,7 +659,7 @@ pmap_page_init(vm_page_t m)
|
||||
|
||||
#ifdef PAE
|
||||
static void *
|
||||
pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
|
||||
{
|
||||
|
||||
/* Inform UMA that this allocator uses kernel_map/object. */
|
||||
|
@ -284,7 +284,7 @@ static int mb_zinit_pack(void *, int, int);
|
||||
static void mb_zfini_pack(void *, int);
|
||||
|
||||
static void mb_reclaim(void *);
|
||||
static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
|
||||
static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
|
||||
|
||||
/* Ensure that MSIZE is a power of 2. */
|
||||
CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
|
||||
@ -389,7 +389,7 @@ SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
|
||||
* pages.
|
||||
*/
|
||||
static void *
|
||||
mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
|
||||
mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
|
||||
{
|
||||
|
||||
/* Inform UMA that this allocator uses kernel_map/object. */
|
||||
|
@ -147,8 +147,8 @@ busdma_bufalloc_findzone(busdma_bufalloc_t ba, bus_size_t size)
|
||||
}
|
||||
|
||||
void *
|
||||
busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, u_int8_t *pflag,
|
||||
int wait)
|
||||
busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size,
|
||||
uint8_t *pflag, int wait)
|
||||
{
|
||||
#ifdef VM_MEMATTR_UNCACHEABLE
|
||||
|
||||
@ -166,7 +166,7 @@ busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, u_int8_t *pflag,
|
||||
}
|
||||
|
||||
void
|
||||
busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag)
|
||||
busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag)
|
||||
{
|
||||
|
||||
kmem_free(kernel_arena, (vm_offset_t)item, size);
|
||||
|
@ -609,7 +609,7 @@ static struct mtx_padalign vmem_bt_lock;
|
||||
* we are really out of KVA.
|
||||
*/
|
||||
static void *
|
||||
vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
|
||||
vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
|
||||
{
|
||||
vmem_addr_t addr;
|
||||
|
||||
|
@ -41,7 +41,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/vmparam.h>
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
@ -70,7 +70,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
}
|
||||
|
||||
void
|
||||
uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_paddr_t pa;
|
||||
|
@ -1437,7 +1437,8 @@ retry:
|
||||
static mmu_t installed_mmu;
|
||||
|
||||
static void *
|
||||
moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
|
||||
int wait)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
vm_offset_t va;
|
||||
|
@ -473,7 +473,7 @@ slb_insert_user(pmap_t pm, struct slb *slb)
|
||||
}
|
||||
|
||||
static void *
|
||||
slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
static vm_offset_t realmax = 0;
|
||||
void *va;
|
||||
|
@ -50,7 +50,7 @@ SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0,
|
||||
"UMA MD pages in use");
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
void *va;
|
||||
vm_page_t m;
|
||||
@ -82,7 +82,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
}
|
||||
|
||||
void
|
||||
uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
|
@ -396,7 +396,7 @@ swi_vm(void *v)
|
||||
}
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
@ -434,7 +434,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
}
|
||||
|
||||
void
|
||||
uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
|
@ -110,9 +110,10 @@ struct busdma_bufzone * busdma_bufalloc_findzone(busdma_bufalloc_t ba,
|
||||
* routines support pmap_page_set_memattr() and the VM_MEMATTR_UNCACHEABLE flag
|
||||
* you can probably use these when you need uncacheable buffers.
|
||||
*/
|
||||
void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size,
|
||||
u_int8_t *pflag, int wait);
|
||||
void busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag);
|
||||
void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size,
|
||||
uint8_t *pflag, int wait);
|
||||
void busdma_bufalloc_free_uncacheable(void *item, vm_size_t size,
|
||||
uint8_t pflag);
|
||||
|
||||
#endif /* _MACHINE_BUSDMA_BUFALLOC_H_ */
|
||||
|
||||
|
@ -382,7 +382,8 @@ uma_zfree(uma_zone_t zone, void *item)
|
||||
* A pointer to the allocated memory or NULL on failure.
|
||||
*/
|
||||
|
||||
typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait);
|
||||
typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
|
||||
int wait);
|
||||
|
||||
/*
|
||||
* Backend page free routines
|
||||
@ -395,7 +396,7 @@ typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait);
|
||||
* Returns:
|
||||
* None
|
||||
*/
|
||||
typedef void (*uma_free)(void *item, int size, uint8_t pflag);
|
||||
typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
|
||||
|
||||
|
||||
|
||||
|
@ -230,10 +230,10 @@ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
|
||||
|
||||
/* Prototypes.. */
|
||||
|
||||
static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
|
||||
static void *page_alloc(uma_zone_t, int, uint8_t *, int);
|
||||
static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
|
||||
static void page_free(void *, int, uint8_t);
|
||||
static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
|
||||
static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
|
||||
static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
|
||||
static void page_free(void *, vm_size_t, uint8_t);
|
||||
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
|
||||
static void cache_drain(uma_zone_t);
|
||||
static void bucket_drain(uma_zone_t, uma_bucket_t);
|
||||
@ -1038,7 +1038,7 @@ out:
|
||||
* the VM is ready.
|
||||
*/
|
||||
static void *
|
||||
startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
|
||||
startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
|
||||
{
|
||||
uma_keg_t keg;
|
||||
uma_slab_t tmps;
|
||||
@ -1098,7 +1098,7 @@ startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
|
||||
* NULL if M_NOWAIT is set.
|
||||
*/
|
||||
static void *
|
||||
page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
|
||||
page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
|
||||
{
|
||||
void *p; /* Returned page */
|
||||
|
||||
@ -1120,7 +1120,7 @@ page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
|
||||
* NULL if M_NOWAIT is set.
|
||||
*/
|
||||
static void *
|
||||
noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
|
||||
noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
|
||||
{
|
||||
TAILQ_HEAD(, vm_page) alloctail;
|
||||
u_long npages;
|
||||
@ -1183,7 +1183,7 @@ noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
|
||||
* Nothing
|
||||
*/
|
||||
static void
|
||||
page_free(void *mem, int size, uint8_t flags)
|
||||
page_free(void *mem, vm_size_t size, uint8_t flags)
|
||||
{
|
||||
struct vmem *vmem;
|
||||
|
||||
@ -3266,7 +3266,7 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
|
||||
}
|
||||
|
||||
void *
|
||||
uma_large_malloc(int size, int wait)
|
||||
uma_large_malloc(vm_size_t size, int wait)
|
||||
{
|
||||
void *mem;
|
||||
uma_slab_t slab;
|
||||
|
@ -341,7 +341,7 @@ zone_first_keg(uma_zone_t zone)
|
||||
#ifdef _KERNEL
|
||||
/* Internal prototypes */
|
||||
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
|
||||
void *uma_large_malloc(int size, int wait);
|
||||
void *uma_large_malloc(vm_size_t size, int wait);
|
||||
void uma_large_free(uma_slab_t slab);
|
||||
|
||||
/* Lock Macros */
|
||||
@ -424,8 +424,9 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
|
||||
* if they can provide more effecient allocation functions. This is useful
|
||||
* for using direct mapped addresses.
|
||||
*/
|
||||
void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait);
|
||||
void uma_small_free(void *mem, int size, uint8_t flags);
|
||||
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
|
||||
int wait);
|
||||
void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* VM_UMA_INT_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user