Rewrite ARM_USE_SMALL_ALLOC so that instead of the current behavior, it maps
whole the physical memory, cached, using 1MB section mappings. This reduces the address space available for user processes a bit, but given the amount of memory a typical arm machine has, it is not (yet) a big issue. It then provides a uma_small_alloc() that works as it does for architectures which have a direct mapping.
This commit is contained in:
parent
d251852965
commit
49953e11d7
@ -66,11 +66,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_global.h"
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
extern vm_offset_t alloc_curaddr;
|
||||
extern vm_offset_t alloc_firstaddr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Used in /dev/mem drivers and elsewhere
|
||||
*/
|
||||
@ -127,8 +122,8 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
|
||||
uio->uio_rw == UIO_READ ?
|
||||
VM_PROT_READ : VM_PROT_WRITE))
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
if (addr < alloc_firstaddr || addr >
|
||||
alloc_curaddr)
|
||||
if (addr <= VM_MAXUSER_ADDRESS ||
|
||||
addr >= KERNBASE)
|
||||
#endif
|
||||
return (EFAULT);
|
||||
error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
|
||||
|
@ -1140,7 +1140,6 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
|
||||
cpu_tlb_flushD_SE(va);
|
||||
cpu_cpwait();
|
||||
}
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
}
|
||||
#endif
|
||||
@ -2384,8 +2383,6 @@ pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
|
||||
#define PMAP_STATIC_L2_SIZE 16
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
extern struct mtx smallalloc_mtx;
|
||||
extern vm_offset_t alloc_curaddr;
|
||||
extern vm_offset_t alloc_firstaddr;
|
||||
#endif
|
||||
|
||||
void
|
||||
@ -2544,9 +2541,9 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
|
||||
alloc_firstaddr = alloc_curaddr = arm_nocache_startaddr +
|
||||
ARM_NOCACHE_KVA_SIZE;
|
||||
arm_init_smallalloc();
|
||||
#endif
|
||||
pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
@ -2933,6 +2930,9 @@ pmap_kremove(vm_offset_t va)
|
||||
vm_offset_t
|
||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
{
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
return (arm_ptovirt(start));
|
||||
#else
|
||||
vm_offset_t sva = *virt;
|
||||
vm_offset_t va = sva;
|
||||
|
||||
@ -2947,6 +2947,7 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||
}
|
||||
*virt = va;
|
||||
return (sva);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3999,6 +4000,10 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
void
|
||||
pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
|
||||
{
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
char *dstpg;
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
|
||||
|
||||
@ -4010,6 +4015,16 @@ pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
|
||||
_arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
|
||||
return;
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
dstpg = (char *)arm_ptovirt(phys);
|
||||
if (off || size != PAGE_SIZE) {
|
||||
bzero(dstpg + off, size);
|
||||
cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
|
||||
} else {
|
||||
bzero_page((vm_offset_t)dstpg);
|
||||
cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
|
||||
}
|
||||
#else
|
||||
|
||||
mtx_lock(&cmtx);
|
||||
/*
|
||||
@ -4021,12 +4036,15 @@ pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
|
||||
PTE_SYNC(cdst_pte);
|
||||
cpu_tlb_flushD_SE(cdstp);
|
||||
cpu_cpwait();
|
||||
if (off || size != PAGE_SIZE)
|
||||
if (off || size != PAGE_SIZE) {
|
||||
bzero((void *)(cdstp + off), size);
|
||||
else
|
||||
cpu_dcache_wbinv_range(cdstp + off, size);
|
||||
} else {
|
||||
bzero_page(cdstp);
|
||||
mtx_unlock(&cmtx);
|
||||
cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
|
||||
}
|
||||
mtx_unlock(&cmtx);
|
||||
#endif
|
||||
}
|
||||
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
|
||||
|
||||
@ -4034,7 +4052,6 @@ pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
|
||||
void
|
||||
pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
|
||||
{
|
||||
|
||||
if (_arm_bzero &&
|
||||
_arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
|
||||
return;
|
||||
@ -4344,12 +4361,23 @@ pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
|
||||
void
|
||||
pmap_copy_page(vm_page_t src, vm_page_t dst)
|
||||
{
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
vm_offset_t srcpg, dstpg;
|
||||
#endif
|
||||
|
||||
cpu_dcache_wbinv_all();
|
||||
if (_arm_memcpy &&
|
||||
_arm_memcpy((void *)VM_PAGE_TO_PHYS(dst),
|
||||
(void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
|
||||
return;
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
srcpg = arm_ptovirt(VM_PAGE_TO_PHYS(src));
|
||||
dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst));
|
||||
bcopy_page(srcpg, dstpg);
|
||||
cpu_dcache_wbinv_range(dstpg, PAGE_SIZE);
|
||||
#else
|
||||
pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/uma.h>
|
||||
#include <vm/uma_int.h>
|
||||
|
||||
@ -73,6 +74,7 @@ __FBSDID("$FreeBSD$");
|
||||
#define NSFBUFS (512 + maxusers * 16)
|
||||
#endif
|
||||
|
||||
#ifndef ARM_USE_SMALL_ALLOC
|
||||
static void sf_buf_init(void *arg);
|
||||
SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
|
||||
|
||||
@ -94,6 +96,7 @@ static u_int sf_buf_alloc_want;
|
||||
* A lock used to synchronize access to the hash table and free list
|
||||
*/
|
||||
static struct mtx sf_buf_lock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Finish a fork operation, with process p2 nearly set up.
|
||||
@ -161,6 +164,7 @@ cpu_thread_swapout(struct thread *td)
|
||||
void
|
||||
sf_buf_free(struct sf_buf *sf)
|
||||
{
|
||||
#ifndef ARM_USE_SMALL_ALLOC
|
||||
mtx_lock(&sf_buf_lock);
|
||||
sf->ref_count--;
|
||||
if (sf->ref_count == 0) {
|
||||
@ -170,11 +174,13 @@ sf_buf_free(struct sf_buf *sf)
|
||||
wakeup_one(&sf_buf_freelist);
|
||||
}
|
||||
mtx_unlock(&sf_buf_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef ARM_USE_SMALL_ALLOC
|
||||
/*
|
||||
* * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
|
||||
* */
|
||||
* Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
|
||||
*/
|
||||
static void
|
||||
sf_buf_init(void *arg)
|
||||
{
|
||||
@ -197,6 +203,7 @@ sf_buf_init(void *arg)
|
||||
sf_buf_alloc_want = 0;
|
||||
mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get an sf_buf from the freelist. Will block if none are available.
|
||||
@ -204,6 +211,9 @@ sf_buf_init(void *arg)
|
||||
struct sf_buf *
|
||||
sf_buf_alloc(struct vm_page *m, int flags)
|
||||
{
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
return ((struct sf_buf *)m);
|
||||
#else
|
||||
struct sf_head *hash_list;
|
||||
struct sf_buf *sf;
|
||||
int error;
|
||||
@ -249,7 +259,7 @@ sf_buf_alloc(struct vm_page *m, int flags)
|
||||
done:
|
||||
mtx_unlock(&sf_buf_lock);
|
||||
return (sf);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -446,10 +456,55 @@ struct mtx smallalloc_mtx;
|
||||
|
||||
MALLOC_DEFINE(M_VMSMALLALLOC, "vm_small_alloc", "VM Small alloc data");
|
||||
|
||||
vm_offset_t alloc_curaddr;
|
||||
vm_offset_t alloc_firstaddr;
|
||||
static vm_offset_t alloc_firstaddr;
|
||||
|
||||
extern int doverbose;
|
||||
vm_offset_t
|
||||
arm_ptovirt(vm_paddr_t pa)
|
||||
{
|
||||
int i;
|
||||
vm_offset_t addr = alloc_firstaddr;
|
||||
|
||||
KASSERT(alloc_firstaddr != 0, ("arm_ptovirt called to early ?"));
|
||||
for (i = 0; dump_avail[i]; i += 2) {
|
||||
if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
|
||||
break;
|
||||
addr += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
|
||||
(dump_avail[i] & L1_S_FRAME);
|
||||
}
|
||||
KASSERT(dump_avail[i] != 0, ("Trying to access invalid physical address"));
|
||||
return (addr + (pa - (dump_avail[i] & L1_S_FRAME)));
|
||||
}
|
||||
|
||||
void
|
||||
arm_init_smallalloc(void)
|
||||
{
|
||||
vm_offset_t to_map = 0, mapaddr;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We need to use dump_avail and not phys_avail, since we want to
|
||||
* map the whole memory and not just the memory available to the VM
|
||||
* to be able to do a pa => va association for any address.
|
||||
*/
|
||||
|
||||
for (i = 0; dump_avail[i]; i+= 2) {
|
||||
to_map += (dump_avail[i + 1] & L1_S_FRAME) + L1_S_SIZE -
|
||||
(dump_avail[i] & L1_S_FRAME);
|
||||
}
|
||||
alloc_firstaddr = mapaddr = KERNBASE - to_map;
|
||||
for (i = 0; dump_avail[i]; i+= 2) {
|
||||
vm_offset_t size = (dump_avail[i + 1] & L1_S_FRAME) +
|
||||
L1_S_SIZE - (dump_avail[i] & L1_S_FRAME);
|
||||
vm_offset_t did = 0;
|
||||
while (size > 0 ) {
|
||||
pmap_kenter_section(mapaddr,
|
||||
(dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE);
|
||||
mapaddr += L1_S_SIZE;
|
||||
did += L1_S_SIZE;
|
||||
size -= L1_S_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
|
||||
@ -470,51 +525,14 @@ arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
arm_uma_do_alloc(struct arm_small_page **pglist, int bytes, int pagetable)
|
||||
{
|
||||
void *ret;
|
||||
vm_page_t page_array = NULL;
|
||||
|
||||
*pglist = (void *)kmem_malloc(kmem_map, (0x100000 / PAGE_SIZE) *
|
||||
sizeof(struct arm_small_page), M_WAITOK);
|
||||
if (*pglist && alloc_curaddr < 0xf0000000) {/* XXX */
|
||||
mtx_lock(&Giant);
|
||||
page_array = vm_page_alloc_contig(0x100000 / PAGE_SIZE,
|
||||
0, 0xffffffff, 0x100000, 0);
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
if (page_array) {
|
||||
vm_paddr_t pa = VM_PAGE_TO_PHYS(page_array);
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
ret = (void*)alloc_curaddr;
|
||||
alloc_curaddr += 0x100000;
|
||||
/* XXX: ARM_TP_ADDRESS should probably be move elsewhere. */
|
||||
if (alloc_curaddr == ARM_TP_ADDRESS)
|
||||
alloc_curaddr += 0x100000;
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
pmap_kenter_section((vm_offset_t)ret, pa
|
||||
, pagetable);
|
||||
} else {
|
||||
if (*pglist)
|
||||
kmem_free(kmem_map, (vm_offset_t)*pglist,
|
||||
(0x100000 / PAGE_SIZE) *
|
||||
sizeof(struct arm_small_page));
|
||||
*pglist = NULL;
|
||||
ret = (void *)kmem_malloc(kmem_map, bytes, M_WAITOK);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
void *ret;
|
||||
struct arm_small_page *sp, *tmp;
|
||||
struct arm_small_page *sp;
|
||||
TAILQ_HEAD(,arm_small_page) *head;
|
||||
static struct thread *in_alloc;
|
||||
static int in_sleep;
|
||||
int should_wakeup = 0;
|
||||
static vm_pindex_t color;
|
||||
vm_page_t m;
|
||||
|
||||
*flags = UMA_SLAB_PRIV;
|
||||
/*
|
||||
@ -527,55 +545,42 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
head = (void *)&pages_normal;
|
||||
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
retry:
|
||||
sp = TAILQ_FIRST(head);
|
||||
|
||||
if (!sp) {
|
||||
/* No more free pages, need to alloc more. */
|
||||
if (!(wait & M_WAITOK) ||
|
||||
in_alloc == curthread) {
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
*flags = UMA_SLAB_KMEM;
|
||||
return ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
|
||||
}
|
||||
if (in_alloc != NULL) {
|
||||
/* Somebody else is already doing the allocation. */
|
||||
in_sleep++;
|
||||
msleep(&in_alloc, &smallalloc_mtx, PWAIT,
|
||||
"smallalloc", 0);
|
||||
in_sleep--;
|
||||
goto retry;
|
||||
}
|
||||
in_alloc = curthread;
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
/* Try to alloc 1MB of contiguous memory. */
|
||||
ret = arm_uma_do_alloc(&sp, bytes, zone == l2zone ?
|
||||
SECTION_PT : SECTION_CACHE);
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
in_alloc = NULL;
|
||||
if (in_sleep > 0)
|
||||
should_wakeup = 1;
|
||||
if (sp) {
|
||||
for (int i = 0; i < (0x100000 / PAGE_SIZE) - 1;
|
||||
i++) {
|
||||
tmp = &sp[i];
|
||||
tmp->addr = (char *)ret + i * PAGE_SIZE;
|
||||
TAILQ_INSERT_HEAD(head, tmp, pg_list);
|
||||
}
|
||||
ret = (char *)ret + 0x100000 - PAGE_SIZE;
|
||||
TAILQ_INSERT_HEAD(&free_pgdesc, &sp[(0x100000 / (
|
||||
PAGE_SIZE)) - 1], pg_list);
|
||||
} else
|
||||
*flags = UMA_SLAB_KMEM;
|
||||
int pflags;
|
||||
|
||||
} else {
|
||||
sp = TAILQ_FIRST(head);
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
if (zone == l2zone &&
|
||||
pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
|
||||
*flags = UMA_SLAB_KMEM;
|
||||
ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
|
||||
return (ret);
|
||||
}
|
||||
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
|
||||
pflags = VM_ALLOC_INTERRUPT;
|
||||
else
|
||||
pflags = VM_ALLOC_SYSTEM;
|
||||
if (wait & M_ZERO)
|
||||
pflags |= VM_ALLOC_ZERO;
|
||||
for (;;) {
|
||||
m = vm_page_alloc(NULL, color++,
|
||||
pflags | VM_ALLOC_NOOBJ);
|
||||
if (m == NULL) {
|
||||
if (wait & M_NOWAIT)
|
||||
return (NULL);
|
||||
VM_WAIT;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
ret = (void *)arm_ptovirt(VM_PAGE_TO_PHYS(m));
|
||||
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
||||
bzero(ret, PAGE_SIZE);
|
||||
return (ret);
|
||||
}
|
||||
TAILQ_REMOVE(head, sp, pg_list);
|
||||
TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
|
||||
ret = sp->addr;
|
||||
}
|
||||
if (should_wakeup)
|
||||
wakeup(&in_alloc);
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
if ((wait & M_ZERO))
|
||||
bzero(ret, bytes);
|
||||
@ -593,18 +598,30 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
else {
|
||||
struct arm_small_page *sp;
|
||||
|
||||
if ((vm_offset_t)mem >= KERNBASE) {
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
sp = TAILQ_FIRST(&free_pgdesc);
|
||||
KASSERT(sp != NULL, ("No more free page descriptor ?"));
|
||||
TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
|
||||
sp->addr = mem;
|
||||
pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, &pt);
|
||||
if ((*pd & pte_l1_s_cache_mask) == pte_l1_s_cache_mode_pt &&
|
||||
pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd,
|
||||
&pt);
|
||||
if ((*pd & pte_l1_s_cache_mask) ==
|
||||
pte_l1_s_cache_mode_pt &&
|
||||
pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
|
||||
TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
|
||||
else
|
||||
TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
} else {
|
||||
vm_page_t m;
|
||||
vm_paddr_t pa = vtophys((vm_offset_t)mem);
|
||||
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,6 +465,15 @@ initarm(void *arg, void *arg2)
|
||||
arm_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
|
||||
|
||||
pmap_curmaxkvaddr = afterkern + 0x100000 * (KERNEL_PT_KERN_NUM - 1);
|
||||
/*
|
||||
* ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before
|
||||
* calling pmap_bootstrap.
|
||||
*/
|
||||
dump_avail[0] = KERNPHYSADDR;
|
||||
dump_avail[1] = KERNPHYSADDR + memsize;
|
||||
dump_avail[2] = 0;
|
||||
dump_avail[3] = 0;
|
||||
|
||||
pmap_bootstrap(freemempos,
|
||||
KERNVIRTADDR + 3 * memsize,
|
||||
&kernel_l1pt);
|
||||
@ -473,10 +482,6 @@ initarm(void *arg, void *arg2)
|
||||
mutex_init();
|
||||
|
||||
i = 0;
|
||||
dump_avail[0] = KERNPHYSADDR;
|
||||
dump_avail[1] = KERNPHYSADDR + memsize;
|
||||
dump_avail[2] = 0;
|
||||
dump_avail[3] = 0;
|
||||
|
||||
phys_avail[0] = virtual_avail - KERNVIRTADDR + KERNPHYSADDR;
|
||||
phys_avail[1] = KERNPHYSADDR + memsize;
|
||||
|
@ -531,7 +531,8 @@ void pmap_postinit(void);
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
void arm_add_smallalloc_pages(void *, void *, int, int);
|
||||
void arm_busy_pages(void);
|
||||
vm_offset_t arm_ptovirt(vm_paddr_t);
|
||||
void arm_init_smallalloc(void);
|
||||
struct arm_small_page {
|
||||
void *addr;
|
||||
TAILQ_ENTRY(arm_small_page) pg_list;
|
||||
|
@ -29,10 +29,34 @@
|
||||
#ifndef _MACHINE_SF_BUF_H_
|
||||
#define _MACHINE_SF_BUF_H_
|
||||
|
||||
#include <sys/queue.h>
|
||||
|
||||
struct vm_page;
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
struct sf_buf;
|
||||
|
||||
|
||||
static __inline vm_offset_t
|
||||
sf_buf_kva(struct sf_buf *sf)
|
||||
{
|
||||
return arm_ptovirt(VM_PAGE_TO_PHYS((vm_page_t)sf));
|
||||
}
|
||||
|
||||
static __inline vm_page_t
|
||||
sf_buf_page(struct sf_buf *sf)
|
||||
{
|
||||
return ((vm_page_t)sf);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include <sys/queue.h>
|
||||
|
||||
struct sf_buf {
|
||||
LIST_ENTRY(sf_buf) list_entry; /* list of buffers */
|
||||
TAILQ_ENTRY(sf_buf) free_entry; /* list of buffers */
|
||||
@ -55,4 +79,5 @@ sf_buf_page(struct sf_buf *sf)
|
||||
return (sf->m);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* !_MACHINE_SF_BUF_H_ */
|
||||
|
@ -96,7 +96,14 @@
|
||||
#define UPT_MIN_ADDRESS VADDR(UPTPTDI, 0)
|
||||
|
||||
#define VM_MIN_ADDRESS (0x00001000)
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
#ifndef ARM_KERN_DIRECTMAP
|
||||
#define ARM_KERN_DIRECTMAP 512 * 1024 * 1024 /* 512 MB */
|
||||
#endif
|
||||
#define VM_MAXUSER_ADDRESS KERNBASE - ARM_KERN_DIRECTMAP
|
||||
#else /* ARM_USE_SMALL_ALLOC */
|
||||
#define VM_MAXUSER_ADDRESS KERNBASE
|
||||
#endif /* ARM_USE_SMALL_ALLOC */
|
||||
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
|
||||
|
||||
#define USRSTACK VM_MAXUSER_ADDRESS
|
||||
|
@ -438,18 +438,15 @@ initarm(void *arg, void *arg2)
|
||||
|
||||
pmap_curmaxkvaddr = freemempos + KERNEL_PT_VMDATA_NUM * 0x400000;
|
||||
|
||||
pmap_bootstrap(freemempos,
|
||||
0xd0000000, &kernel_l1pt);
|
||||
|
||||
|
||||
mutex_init();
|
||||
|
||||
|
||||
dump_avail[0] = phys_avail[0] = round_page(virtual_avail);
|
||||
dump_avail[1] = phys_avail[1] = 0xc0000000 + 0x02000000 - 1;
|
||||
dump_avail[2] = phys_avail[2] = 0;
|
||||
dump_avail[3] = phys_avail[3] = 0;
|
||||
|
||||
mutex_init();
|
||||
pmap_bootstrap(freemempos,
|
||||
0xd0000000, &kernel_l1pt);
|
||||
|
||||
/* Do basic tuning, hz etc */
|
||||
init_param1();
|
||||
init_param2(physmem);
|
||||
|
@ -442,6 +442,15 @@ initarm(void *arg, void *arg2)
|
||||
|
||||
|
||||
pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
|
||||
/*
|
||||
* ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before
|
||||
* calling pmap_bootstrap.
|
||||
*/
|
||||
dump_avail[0] = 0xa0000000;
|
||||
dump_avail[1] = 0xa0000000 + memsize;
|
||||
dump_avail[2] = 0;
|
||||
dump_avail[3] = 0;
|
||||
|
||||
pmap_bootstrap(pmap_curmaxkvaddr,
|
||||
0xd0000000, &kernel_l1pt);
|
||||
msgbufp = (void*)msgbufpv.pv_va;
|
||||
@ -460,10 +469,6 @@ initarm(void *arg, void *arg2)
|
||||
phys_avail[i++] = trunc_page(0xa0000000 + memsize - 1);
|
||||
phys_avail[i++] = 0;
|
||||
phys_avail[i] = 0;
|
||||
dump_avail[0] = 0xa0000000;
|
||||
dump_avail[1] = 0xa0000000 + memsize;
|
||||
dump_avail[2] = 0;
|
||||
dump_avail[3] = 0;
|
||||
|
||||
/* Do basic tuning, hz etc */
|
||||
init_param1();
|
||||
|
@ -2,6 +2,7 @@
|
||||
ARM9_CACHE_WRITE_THROUGH opt_global.h
|
||||
ARM_CACHE_LOCK_ENABLE opt_global.h
|
||||
ARMFPE opt_global.h
|
||||
ARM_KERN_DIRECTMAP opt_vm.h
|
||||
ARM_USE_SMALL_ALLOC opt_global.h
|
||||
COUNTS_PER_SEC opt_timer.h
|
||||
CPU_SA1100 opt_global.h
|
||||
|
Loading…
Reference in New Issue
Block a user