Request a CPU private mapping from sf_buf_alloc(). If the swap-backed
memory disk is larger than the number of available sf_bufs, this improves performance on SMPs by eliminating interprocessor TLB shootdowns. For example, with 6656 sf_bufs, the default on my test machine, and a 256MB swap-backed memory disk, I see the command "dd if=/dev/md0 of=/dev/null bs=64k" achieve ~489MB/sec with the default, shared mappings, and ~587MB/sec with CPU private mappings.
This commit is contained in:
parent
4f2d08a331
commit
13e88b41ba
@ -552,13 +552,15 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
m = vm_page_grab(sc->object, i,
|
||||
VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(sc->object);
|
||||
sf = sf_buf_alloc(m, 0);
|
||||
sched_pin();
|
||||
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
|
||||
VM_OBJECT_LOCK(sc->object);
|
||||
if (bp->bio_cmd == BIO_READ) {
|
||||
if (m->valid != VM_PAGE_BITS_ALL)
|
||||
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
|
||||
if (rv == VM_PAGER_ERROR) {
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
@ -570,6 +572,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
|
||||
if (rv == VM_PAGER_ERROR) {
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
@ -583,6 +586,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
|
||||
if (rv == VM_PAGER_ERROR) {
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
@ -594,6 +598,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
#endif
|
||||
}
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m);
|
||||
vm_page_activate(m);
|
||||
|
Loading…
Reference in New Issue
Block a user