219 lines
4.6 KiB
C
219 lines
4.6 KiB
C
|
|
#include <stdbool.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <sys/kassert.h>
|
|
#include <sys/kdebug.h>
|
|
#include <sys/kmem.h>
|
|
#include <sys/spinlock.h>
|
|
#include <sys/disk.h>
|
|
#include <sys/diskcache.h>
|
|
|
|
Spinlock cacheLock;
|
|
XMem *diskBuf;
|
|
|
|
static TAILQ_HEAD(CacheHashTable, DiskCacheEntry) *hashTable;
|
|
static TAILQ_HEAD(LRUCacheList, DiskCacheEntry) lruList;
|
|
static uint64_t cacheHit;
|
|
static uint64_t cacheMiss;
|
|
static uint64_t cacheAlloc;
|
|
static Slab cacheEntrySlab;
|
|
|
|
DEFINE_SLAB(DiskCacheEntry, &cacheEntrySlab);
|
|
|
|
#define CACHESIZE (16*1024*1024)
|
|
#define HASHTABLEENTRIES 128
|
|
#define BLOCKSIZE (16*1024)
|
|
|
|
void
|
|
DiskCache_Init()
|
|
{
|
|
int i;
|
|
|
|
Spinlock_Init(&cacheLock, "DiskCache Lock", SPINLOCK_TYPE_NORMAL);
|
|
|
|
diskBuf = XMem_New();
|
|
if (!diskBuf)
|
|
Panic("DiskCache: Cannot create XMem region\n");
|
|
|
|
if (!XMem_Allocate(diskBuf, CACHESIZE))
|
|
Panic("DiskCache: Cannot back XMem region\n");
|
|
|
|
TAILQ_INIT(&lruList);
|
|
|
|
hashTable = PAlloc_AllocPage();
|
|
if (!hashTable)
|
|
Panic("DiskCache: Cannot allocate hash table\n");
|
|
for (i = 0; i < HASHTABLEENTRIES; i++) {
|
|
TAILQ_INIT(&hashTable[i]);
|
|
}
|
|
|
|
Slab_Init(&cacheEntrySlab, "DiskCacheEntry Slab", sizeof(DiskCacheEntry), 16);
|
|
|
|
// Initialize cache
|
|
uintptr_t bufBase = XMem_GetBase(diskBuf);
|
|
for (i = 0; i < CACHESIZE/BLOCKSIZE; i++) {
|
|
DiskCacheEntry *e = DiskCacheEntry_Alloc();
|
|
if (!e) {
|
|
Panic("DiskCache: Cannot allocate cache entry\n");
|
|
}
|
|
|
|
memset(e, 0, sizeof(*e));
|
|
e->disk = NULL;
|
|
e->buffer = (void *)(bufBase + BLOCKSIZE * i);
|
|
TAILQ_INSERT_TAIL(&lruList, e, lruEntry);
|
|
}
|
|
|
|
cacheHit = 0;
|
|
cacheMiss = 0;
|
|
cacheAlloc = 0;
|
|
}
|
|
|
|
int
|
|
DiskCacheLookup(Disk *disk, uint64_t diskOffset, DiskCacheEntry **entry)
|
|
{
|
|
struct CacheHashTable *table;
|
|
DiskCacheEntry *e;
|
|
|
|
// Check hash table
|
|
table = &hashTable[diskOffset % HASHTABLEENTRIES];
|
|
TAILQ_FOREACH(e, table, htEntry) {
|
|
if (e->disk == disk && e->diskOffset == diskOffset) {
|
|
e->refCount++;
|
|
if (e->refCount == 1) {
|
|
TAILQ_REMOVE(&lruList, e, lruEntry);
|
|
}
|
|
*entry = e;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
*entry = NULL;
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
DiskCacheAlloc(Disk *disk, uint64_t diskOffset, DiskCacheEntry **entry)
|
|
{
|
|
struct CacheHashTable *table;
|
|
DiskCacheEntry *e;
|
|
|
|
// Allocate from LRU list
|
|
e = TAILQ_FIRST(&lruList);
|
|
if (e == NULL) {
|
|
kprintf("DiskCache: No space left!\n");
|
|
return -1;
|
|
}
|
|
TAILQ_REMOVE(&lruList, e, lruEntry);
|
|
|
|
// Remove from hash table
|
|
if (e->disk != NULL) {
|
|
table = &hashTable[e->diskOffset % HASHTABLEENTRIES];
|
|
TAILQ_REMOVE(table, e, htEntry);
|
|
}
|
|
|
|
// Initialize
|
|
e->disk = disk;
|
|
e->diskOffset = diskOffset;
|
|
e->refCount = 1;
|
|
|
|
// Reinsert into hash table
|
|
table = &hashTable[diskOffset % HASHTABLEENTRIES];
|
|
TAILQ_INSERT_HEAD(table, e, htEntry);
|
|
*entry = e;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
DiskCache_Alloc(Disk *disk, uint64_t diskOffset, DiskCacheEntry **entry)
|
|
{
|
|
int status;
|
|
|
|
Spinlock_Lock(&cacheLock);
|
|
|
|
status = DiskCacheLookup(disk, diskOffset, entry);
|
|
if (*entry == NULL) {
|
|
status = DiskCacheAlloc(disk, diskOffset, entry);
|
|
}
|
|
|
|
cacheAlloc++;
|
|
|
|
Spinlock_Unlock(&cacheLock);
|
|
|
|
return status;
|
|
}
|
|
|
|
void
|
|
DiskCache_Release(DiskCacheEntry *entry)
|
|
{
|
|
Spinlock_Lock(&cacheLock);
|
|
|
|
entry->refCount--;
|
|
if (entry->refCount == 0) {
|
|
TAILQ_INSERT_TAIL(&lruList, entry, lruEntry);
|
|
}
|
|
|
|
Spinlock_Unlock(&cacheLock);
|
|
}
|
|
|
|
int
|
|
DiskCache_Read(Disk *disk, uint64_t diskOffset, DiskCacheEntry **entry)
|
|
{
|
|
int status;
|
|
void *buf;
|
|
SGArray sga;
|
|
|
|
Spinlock_Lock(&cacheLock);
|
|
status = DiskCacheLookup(disk, diskOffset, entry);
|
|
if (*entry != NULL) {
|
|
cacheHit++;
|
|
Spinlock_Unlock(&cacheLock);
|
|
return status;
|
|
}
|
|
cacheMiss++;
|
|
|
|
status = DiskCacheAlloc(disk, diskOffset, entry);
|
|
if (status != 0) {
|
|
Spinlock_Unlock(&cacheLock);
|
|
return status;
|
|
}
|
|
|
|
buf = (*entry)->buffer;
|
|
SGArray_Init(&sga);
|
|
SGArray_Append(&sga, diskOffset, BLOCKSIZE);
|
|
|
|
/*
|
|
* XXX: Need to avoid holding cacheLock while reading from the disk, but
|
|
* need ensure other cores doesn't issue a read to the same block.
|
|
*/
|
|
status = Disk_Read(disk, buf, &sga, NULL, NULL);
|
|
Spinlock_Unlock(&cacheLock);
|
|
|
|
return status;
|
|
}
|
|
|
|
int
|
|
DiskCache_Write(DiskCacheEntry *entry)
|
|
{
|
|
void *buf = entry->buffer;
|
|
SGArray sga;
|
|
|
|
SGArray_Init(&sga);
|
|
SGArray_Append(&sga, entry->diskOffset, BLOCKSIZE);
|
|
|
|
return Disk_Write(entry->disk, buf, &sga, NULL, NULL);
|
|
}
|
|
|
|
void
|
|
Debug_DiskCache(int argc, const char *argv[])
|
|
{
|
|
kprintf("Hits: %lld\n", cacheHit);
|
|
kprintf("Misses: %lld\n", cacheMiss);
|
|
kprintf("Allocations: %lld\n", cacheAlloc);
|
|
}
|
|
|
|
REGISTER_DBGCMD(diskcache, "Display disk cache statistics", Debug_DiskCache);
|
|
|