131 lines
2.7 KiB
C
131 lines
2.7 KiB
C
#include <errno.h>
|
|
#include <string.h>
|
|
|
|
#include <sys/kassert.h>
|
|
#include <sys/kmem.h>
|
|
#include <machine/paging.h>
|
|
#include <machine/pmap.h>
|
|
#include <sys/thread.h>
|
|
#include <sys/mutex.h>
|
|
#include <sys/spinlock.h>
|
|
|
|
struct pdcache_info {
|
|
void * next;
|
|
};
|
|
_Static_assert(sizeof(struct pdcache_info) <= sizeof(struct vmpd));
|
|
|
|
struct pdcache_desc {
|
|
struct pdcache_info * first;
|
|
unsigned long free;
|
|
unsigned long total;
|
|
Spinlock lock;
|
|
};
|
|
|
|
static struct pdcache_desc pdc;
|
|
|
|
static int
|
|
pdcache_init_region(void * region, unsigned long len, struct pdcache_info ** outf, struct pdcache_info ** outl, unsigned long * free)
|
|
{
|
|
if (len < sizeof(struct vmpd)) {
|
|
return ENOMEM;
|
|
}
|
|
|
|
unsigned long count = 0;
|
|
struct pdcache_info * first = NULL, * last = NULL;
|
|
while (true) {
|
|
if (len < sizeof(struct vmpd)) {
|
|
break;
|
|
}
|
|
len = len - sizeof(struct vmpd);
|
|
|
|
struct pdcache_info * info = (struct pdcache_info *)(region + len);
|
|
|
|
if (last == NULL) {
|
|
last = info;
|
|
}
|
|
|
|
info->next = first;
|
|
first = info;
|
|
count++;
|
|
}
|
|
|
|
*outf = first;
|
|
*outl = last;
|
|
*free = count;
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
pdcache_grow()
|
|
{
|
|
ASSERT(Spinlock_IsHeld(&pdc.lock));
|
|
|
|
void * page = PAlloc_AllocPage();
|
|
if (page == NULL) {
|
|
return ENOMEM;
|
|
}
|
|
|
|
struct pdcache_info * first, * last;
|
|
unsigned long free;
|
|
int ret;
|
|
// initialize the page
|
|
if ((ret = pdcache_init_region(page, (1 << PGSHIFT), &first, &last, &free)) != 0) {
|
|
return ret;
|
|
}
|
|
|
|
pdc.free += free;
|
|
last->next = pdc.first;
|
|
pdc.first = first;
|
|
pdc.total += free;
|
|
|
|
kprintf("Growing pdcache: +%d nodes, total: %d nodes.\n", free, pdc.total);
|
|
|
|
return 0;
|
|
}
|
|
|
|
// allocates a new pd and returns its vaddr
|
|
struct vmpd *
|
|
pdcache_alloc()
|
|
{
|
|
Spinlock_Lock(&pdc.lock);
|
|
if (pdc.free == 0) {
|
|
// we ran out
|
|
if (pdcache_grow() != 0) {
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
ASSERT(pdc.free > 0 && pdc.first != NULL);
|
|
struct pdcache_info * cur = pdc.first;
|
|
pdc.first = cur->next;
|
|
Spinlock_Unlock(&pdc.lock);
|
|
|
|
memset(cur, 0, sizeof(struct vmpd));
|
|
|
|
return (struct vmpd *)cur;
|
|
}
|
|
|
|
// frees an existing pdcache
|
|
void
|
|
pdcache_free(struct vmpd * pd)
|
|
{
|
|
Spinlock_Lock(&pdc.lock);
|
|
struct pdcache_info * ret = (struct pdcache_info *)pd;
|
|
struct pdcache_info * old = pdc.first->next;
|
|
|
|
pdc.first = ret;
|
|
ret->next = old;
|
|
pdc.free++;
|
|
ASSERT(pdc.total >= pdc.free);
|
|
|
|
Spinlock_Unlock(&pdc.lock);
|
|
}
|
|
|
|
void
|
|
pdcache_init()
|
|
{
|
|
Spinlock_Init(&pdc.lock, "pdcache lock", SPINLOCK_TYPE_NORMAL);
|
|
pdc.first = NULL;
|
|
pdc.free = 0;
|
|
pdc.total = 0;
|
|
} |