/* * Copyright (c) 2013-2014 Stanford University * All rights reserved. */ #include #include #include #include #include #include #include #include #include #include // PGSIZE #include #include /* 'FREEPAGE' */ #define FREEPAGE_MAGIC_FREE 0x4652454550414745ULL /* 'ALLOCATE' */ #define FREEPAGE_MAGIC_INUSE 0x414c4c4f43415445ULL Spinlock pallocLock; uint64_t totalPages; uint64_t freePages; typedef struct FreePage { uint64_t magic; LIST_ENTRY(FreePage) entries; } FreePage; typedef struct PageInfo { uint64_t refCount; } PageInfo; XMem *pageInfoXMem; PageInfo *pageInfoTable; uint64_t pageInfoLength; LIST_HEAD(FreeListHead, FreePage) freeList; /* * Initializes the page allocator */ void PAlloc_Init() { totalPages = 0; freePages = 0; Spinlock_Init(&pallocLock, "PAlloc Lock"); LIST_INIT(&freeList); pageInfoXMem = NULL; pageInfoTable = NULL; } /* * The late init call is made after the page tables are initialized using a * small boot memory region (2nd 16MBs). This is where initialize the XMem * region that represents the PageInfo array, and map memory into it. */ void PAlloc_LateInit() { void *pageInfoOld = pageInfoTable; pageInfoXMem = XMem_New(); if (!XMem_Allocate(pageInfoXMem, pageInfoLength)) { Panic("Cannot back pageInfoTable!"); } pageInfoTable = (PageInfo *)XMem_GetBase(pageInfoXMem); memcpy(pageInfoTable, pageInfoOld, pageInfoLength); // Free old pages } void PAlloc_AddRegion(uintptr_t start, uintptr_t len) { uintptr_t i; FreePage *pg; if ((start % PGSIZE) != 0) Panic("Region start is not page aligned!"); if ((len % PGSIZE) != 0) Panic("Region length is not page aligned!"); /* * PageInfo table isn't initialized on the first call to this function. We * must allocate a temporary table that will be copied into the XMem region * inside PAlloc_LateInit. * * Note that the PageInfo table is invalid for regions that are not added * to the free list such as MMIO regions. */ if (pageInfoTable == NULL) { // Physical Address Offsets uintptr_t base = (uintptr_t)DMVA2PA(start); uintptr_t end = base + len; pageInfoLength = ROUNDUP(end / PGSIZE * sizeof(PageInfo), PGSIZE); pageInfoTable = (PageInfo *)start; start += pageInfoLength; len -= pageInfoLength; for (i = 0; i < (base / PGSIZE); i++) { pageInfoTable[i].refCount = 1; } for (i = (base / PGSIZE); i < (end / PGSIZE); i++) { pageInfoTable[i].refCount = 0; } for (i = 0; i < (pageInfoLength / PGSIZE); i++) { pageInfoTable[i + (base / PGSIZE)].refCount = 1; } } else { /* * Only the first call to AddRegion should occur before the XMem region * is initialized. */ ASSERT(pageInfoXMem != NULL); uintptr_t base = (uintptr_t)DMVA2PA(start); uintptr_t end = base + len; uintptr_t newLength = ROUNDUP(end / PGSIZE * sizeof(PageInfo), PGSIZE); if (!XMem_Allocate(pageInfoXMem, newLength)) Panic("Cannot allocate XMem region!"); // Initialize new pages for (i = (base / PGSIZE); i < (end / PGSIZE); i++) { pageInfoTable[i].refCount = 0; } } Spinlock_Lock(&pallocLock); for (i = 0; i < len; i += PGSIZE) { pg = (void *)(start + i); pg->magic = FREEPAGE_MAGIC_FREE; totalPages++; freePages++; LIST_INSERT_HEAD(&freeList, pg, entries); } Spinlock_Unlock(&pallocLock); } static inline PageInfo * PAllocGetInfo(void *pg) { uintptr_t entry = (uintptr_t)DMVA2PA(pg) / PGSIZE; return &pageInfoTable[entry]; } void * PAlloc_AllocPage() { PageInfo *info; FreePage *pg; Spinlock_Lock(&pallocLock); pg = LIST_FIRST(&freeList); LIST_REMOVE(pg, entries); info = PAllocGetInfo(pg); ASSERT(info->refCount == 0); info->refCount++; ASSERT(pg->magic == FREEPAGE_MAGIC_FREE); pg->magic = FREEPAGE_MAGIC_INUSE; freePages--; Spinlock_Unlock(&pallocLock); return (void *)pg; } void PAllocFreePage(void *region) { FreePage *pg = (FreePage *)region; LIST_INSERT_HEAD(&freeList, pg, entries); #ifndef NDEBUG // Application can write this magic, but for // debug builds we can use this as a double free check. ASSERT(pg->magic != FREEPAGE_MAGIC_FREE); PageInfo *info = PAllocGetInfo(pg); ASSERT(info->refCount == 0); #endif pg->magic = FREEPAGE_MAGIC_FREE; freePages++; } void PAlloc_Retain(void *pg) { PageInfo *info = PAllocGetInfo(pg); Spinlock_Lock(&pallocLock); ASSERT(info->refCount != 0); info->refCount++; Spinlock_Unlock(&pallocLock); } void PAlloc_Release(void *pg) { PageInfo *info = PAllocGetInfo(pg); Spinlock_Lock(&pallocLock); info->refCount--; if (info->refCount == 0) PAllocFreePage(pg); Spinlock_Unlock(&pallocLock); } void Debug_PAllocStats(int argc, const char *argv[]) { kprintf("Total Pages: %llu\n", totalPages); kprintf("Allocated Pages: %llu\n", totalPages - freePages); kprintf("Free Pages: %llu\n", freePages); } REGISTER_DBGCMD(pallocstats, "Page allocator statistics", Debug_PAllocStats); void Debug_PAllocDump(int argc, const char *argv[]) { struct FreePage *it; LIST_FOREACH(it, &freeList, entries) { if (it->magic != FREEPAGE_MAGIC_FREE) kprintf("Magic Corrupted! (%lx)\n", it->magic); kprintf("Free %lx\n", (uintptr_t)it); } } REGISTER_DBGCMD(pallocdump, "Dump page allocator's free list", Debug_PAllocDump);