Zap #if 0'ed map init code that got moved to the MI area.

Convert the powerpc tree to use the common code.
This commit is contained in:
Peter Wemm 2001-09-04 08:42:35 +00:00
parent b53f9c45f9
commit cfbf880deb
8 changed files with 4 additions and 992 deletions

View File

@ -276,115 +276,6 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
#if 0
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB.
*/
if (nbuf == 0) {
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem > 1024)
nbuf += min((physmem - 1024) / factor, 16384 / factor);
if (physmem > 16384)
nbuf += (physmem - 16384) * 2 / (factor * 5);
}
nswbuf = max(min(nbuf/4, 64), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (vm_offset_t)kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
if (1)

View File

@ -247,146 +247,6 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
#if 0
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else {
physmem_est = min(physmem, btoc(kernel_map->max_offset -
kernel_map->min_offset));
}
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB. When auto-sizing
* the buffer cache we limit the eventual kva reservation to
* maxbcache bytes.
*
* factor represents the 1/4 x ram conversion.
*/
if (nbuf == 0) {
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
if (maxbcache && nbuf > maxbcache / BKVASIZE)
nbuf = maxbcache / BKVASIZE;
}
/*
* Do not allow the buffer_map to be more then 1/2 the size of the
* kernel_map.
*/
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -247,146 +247,6 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
#if 0
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else {
physmem_est = min(physmem, btoc(kernel_map->max_offset -
kernel_map->min_offset));
}
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB. When auto-sizing
* the buffer cache we limit the eventual kva reservation to
* maxbcache bytes.
*
* factor represents the 1/4 x ram conversion.
*/
if (nbuf == 0) {
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
if (maxbcache && nbuf > maxbcache / BKVASIZE)
nbuf = maxbcache / BKVASIZE;
}
/*
* Do not allow the buffer_map to be more then 1/2 the size of the
* kernel_map.
*/
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -260,146 +260,6 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
#if 0
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else {
physmem_est = min(physmem, btoc(kernel_map->max_offset -
kernel_map->min_offset));
}
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB. When auto-sizing
* the buffer cache we limit the eventual kva reservation to
* maxbcache bytes.
*
* factor represents the 1/4 x ram conversion.
*/
if (nbuf == 0) {
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
if (maxbcache && nbuf > maxbcache / BKVASIZE)
nbuf = maxbcache / BKVASIZE;
}
/*
* Do not allow the buffer_map to be more then 1/2 the size of the
* kernel_map.
*/
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -260,146 +260,6 @@ cpu_startup(dummy)
vm_ksubmap_init(&kmi);
#if 0
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else {
physmem_est = min(physmem, btoc(kernel_map->max_offset -
kernel_map->min_offset));
}
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB. When auto-sizing
* the buffer cache we limit the eventual kva reservation to
* maxbcache bytes.
*
* factor represents the 1/4 x ram conversion.
*/
if (nbuf == 0) {
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
if (maxbcache && nbuf > maxbcache / BKVASIZE)
nbuf = maxbcache / BKVASIZE;
}
/*
* Do not allow the buffer_map to be more then 1/2 the size of the
* kernel_map.
*/
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -162,9 +162,7 @@ struct bat battable[16];
static void identifycpu(void);
static vm_offset_t buffer_sva, buffer_eva;
vm_offset_t clean_sva, clean_eva;
static vm_offset_t pager_sva, pager_eva;
struct kva_md_info kmi;
static void
powerpc_ofw_shutdown(void *junk, int howto)
@ -177,14 +175,6 @@ powerpc_ofw_shutdown(void *junk, int howto)
static void
cpu_startup(void *dummy)
{
unsigned int i;
caddr_t v;
vm_offset_t maxaddr;
vm_size_t size;
vm_offset_t firstaddr;
vm_offset_t minaddr;
size = 0;
/*
* Good {morning,afternoon,evening,night}.
@ -214,113 +204,7 @@ cpu_startup(void *dummy)
}
}
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB.
*/
if (nbuf == 0) {
int factor;
factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (Maxmem > 1024)
nbuf += min((Maxmem - 1024) / factor, 16384 / factor);
if (Maxmem > 16384)
nbuf += (Maxmem - 16384) * 2 / (factor * 5);
}
nswbuf = max(min(nbuf/4, 64), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (vm_offset_t)kmem_alloc(kernel_map,
round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
vm_ksubmap_init(&kmi);
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)

View File

@ -162,9 +162,7 @@ struct bat battable[16];
static void identifycpu(void);
static vm_offset_t buffer_sva, buffer_eva;
vm_offset_t clean_sva, clean_eva;
static vm_offset_t pager_sva, pager_eva;
struct kva_md_info kmi;
static void
powerpc_ofw_shutdown(void *junk, int howto)
@ -177,14 +175,6 @@ powerpc_ofw_shutdown(void *junk, int howto)
static void
cpu_startup(void *dummy)
{
unsigned int i;
caddr_t v;
vm_offset_t maxaddr;
vm_size_t size;
vm_offset_t firstaddr;
vm_offset_t minaddr;
size = 0;
/*
* Good {morning,afternoon,evening,night}.
@ -214,113 +204,7 @@ cpu_startup(void *dummy)
}
}
/*
* Calculate callout wheel size
*/
for (callwheelsize = 1, callwheelbits = 0;
callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
/*
* Allocate space for system data structures.
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
* buffers to cover 1/20 of our ram over 64MB.
*/
if (nbuf == 0) {
int factor;
factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (Maxmem > 1024)
nbuf += min((Maxmem - 1024) / factor, 16384 / factor);
if (Maxmem > 16384)
nbuf += (Maxmem - 16384) * 2 / (factor * 5);
}
nswbuf = max(min(nbuf/4, 64), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
v = bufhashinit(v);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (vm_offset_t)kmem_alloc(kernel_map,
round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
/*
* Initialize callouts
*/
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
vm_ksubmap_init(&kmi);
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)

View File

@ -166,93 +166,6 @@ cpu_startup(void *arg)
#endif
vm_ksubmap_init(&kmi);
#if 0
/*
* XXX make most of this MI and move to sys/kern.
*/
/*
* Calculate callout wheel size.
*/
for (callwheelsize = 1, callwheelbits = 0; callwheelsize < ncallout;
callwheelsize <<= 1, ++callwheelbits)
;
callwheelmask = callwheelsize - 1;
size = 0;
va = 0;
again:
p = (caddr_t)va;
#define valloc(name, type, num) \
(name) = (type *)p; p = (caddr_t)((name) + (num))
valloc(callout, struct callout, ncallout);
valloc(callwheel, struct callout_tailq, callwheelsize);
if (kernel_map->first_free == NULL) {
printf("Warning: no free entries in kernel_map.\n");
physmem_est = physmem;
} else
physmem_est = min(physmem,
kernel_map->max_offset - kernel_map->min_offset);
if (nbuf == 0) {
factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
if (physmem_est > 1024)
nbuf += min((physmem_est - 1024) / factor,
16384 / factor);
if (physmem_est > 16384)
nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2)) {
nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
(BKVASIZE * 2);
printf("Warning: nbufs capped at %d\n", nbuf);
}
nswbuf = max(min(nbuf/4, 256), 16);
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
p = bufhashinit(p);
if (va == 0) {
size = (vm_size_t)(p - va);
if ((va = kmem_alloc(kernel_map, round_page(size))) == 0)
panic("startup: no room for tables");
goto again;
}
if ((vm_size_t)(p - va) != size)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
SLIST_INIT(&callfree);
for (i = 0; i < ncallout; i++) {
callout_init(&callout[i], 0);
callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
}
for (i = 0; i < callwheelsize; i++)
TAILQ_INIT(&callwheel[i]);
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
#endif
bufinit();
vm_pager_bufferinit();