- Add an optimized page copy function for use by pmap_copy_page(). It is

roughly four times faster than bcopy() for uncached pages.
 - Sort the function prototypes in md_var.h.
This commit is contained in:
Alan Cox 2004-03-31 02:03:49 +00:00
parent db48c0d254
commit c64b70130e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=127653
3 changed files with 30 additions and 3 deletions

View File

@ -2463,7 +2463,7 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
bcopy((void *)src, (void *)dst, PAGE_SIZE);
pagecopy((void *)src, (void *)dst);
}
/*

View File

@ -167,6 +167,32 @@ ENTRY(memcpy)
movsb
ret
/*
* pagecopy(%rdi=from, %rsi=to)
*/
ENTRY(pagecopy)
movq $-PAGE_SIZE,%rax
movq %rax,%rdx
subq %rax,%rdi
subq %rax,%rsi
1:
prefetchnta (%rdi,%rax)
addq $64,%rax
jne 1b
2:
movq (%rdi,%rdx),%rax
movnti %rax,(%rsi,%rdx)
movq 8(%rdi,%rdx),%rax
movnti %rax,8(%rsi,%rdx)
movq 16(%rdi,%rdx),%rax
movnti %rax,16(%rsi,%rdx)
movq 24(%rdi,%rdx),%rax
movnti %rax,24(%rsi,%rdx)
addq $32,%rdx
jne 2b
sfence
ret
/* fillw(pat, base, cnt) */
/* %rdi,%rsi, %rdx */
ENTRY(fillw)

View File

@ -65,11 +65,12 @@ void doreti_iret(void) __asm(__STRING(doreti_iret));
void doreti_iret_fault(void) __asm(__STRING(doreti_iret_fault));
void enable_sse(void);
void fillw(int /*u_short*/ pat, void *base, size_t cnt);
void pagezero(void *addr);
void fpstate_drop(struct thread *td);
int is_physical_memory(vm_offset_t addr);
int isa_nmi(int cd);
void pagecopy(void *from, void *to);
void pagezero(void *addr);
void setidt(int idx, alias_for_inthand_t *func, int typ, int dpl, int ist);
int user_dbreg_trap(void);
void fpstate_drop(struct thread *td);
#endif /* !_MACHINE_MD_VAR_H_ */