Adapt the tsb_foreach interface to take a source and a destination pmap so
that it can be used for pmap_copy. Other consumers ignore the second pmap. Add statistics gathering for tsb_foreach. Implement pmap_copy.
This commit is contained in:
parent
7cc66bde35
commit
919f71f0fc
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=91168
@ -73,14 +73,14 @@ tsb_kvtotte(vm_offset_t va)
|
||||
return (tsb_kvpntotte(va >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
typedef int (tsb_callback_t)(struct pmap *, struct tte *, vm_offset_t);
|
||||
typedef int (tsb_callback_t)(struct pmap *, struct pmap *, struct tte *, vm_offset_t);
|
||||
|
||||
struct tte *tsb_tte_lookup(pmap_t pm, vm_offset_t va);
|
||||
void tsb_tte_remove(struct tte *stp);
|
||||
struct tte *tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va,
|
||||
struct tte tte);
|
||||
void tsb_tte_local_remove(struct tte *tp);
|
||||
void tsb_foreach(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
void tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
|
||||
tsb_callback_t *callback);
|
||||
|
||||
#endif /* !_MACHINE_TSB_H_ */
|
||||
|
@ -1186,8 +1186,8 @@ pmap_collect(void)
|
||||
pmap_pagedaemon_waken = 0;
|
||||
}
|
||||
|
||||
int
|
||||
pmap_remove_tte(struct pmap *pm, struct tte *tp, vm_offset_t va)
|
||||
static int
|
||||
pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
@ -1218,25 +1218,26 @@ void
|
||||
pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
|
||||
CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
|
||||
pm->pm_context, start, end);
|
||||
if (PMAP_REMOVE_DONE(pm))
|
||||
return;
|
||||
if (end - start > PMAP_TSB_THRESH)
|
||||
tsb_foreach(pm, start, end, pmap_remove_tte);
|
||||
tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
|
||||
else {
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
if ((tp = tsb_tte_lookup(pm, start)) != NULL) {
|
||||
if (!pmap_remove_tte(pm, tp, start))
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
|
||||
if (!pmap_remove_tte(pm, NULL, tp, va))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
pmap_protect_tte(struct pmap *pm, struct tte *tp, vm_offset_t va)
|
||||
static int
|
||||
pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t va)
|
||||
{
|
||||
vm_page_t m;
|
||||
u_long data;
|
||||
@ -1274,6 +1275,7 @@ pmap_protect_tte(struct pmap *pm, struct tte *tp, vm_offset_t va)
|
||||
void
|
||||
pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
{
|
||||
vm_offset_t va;
|
||||
struct tte *tp;
|
||||
|
||||
CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
|
||||
@ -1291,11 +1293,11 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
return;
|
||||
|
||||
if (eva - sva > PMAP_TSB_THRESH)
|
||||
tsb_foreach(pm, sva, eva, pmap_protect_tte);
|
||||
tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
|
||||
else {
|
||||
for (; sva < eva; sva += PAGE_SIZE) {
|
||||
if ((tp = tsb_tte_lookup(pm, sva)) != NULL)
|
||||
pmap_protect_tte(pm, tp, sva);
|
||||
for (va = sva; va < eva; va += PAGE_SIZE) {
|
||||
if ((tp = tsb_tte_lookup(pm, va)) != NULL)
|
||||
pmap_protect_tte(pm, NULL, tp, va);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1487,11 +1489,48 @@ pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
|
||||
{
|
||||
struct tte tte;
|
||||
vm_page_t m;
|
||||
|
||||
if (tsb_tte_lookup(dst_pmap, va) == NULL) {
|
||||
tte.tte_data = tp->tte_data & ~(TD_PV | TD_REF | TD_CV | TD_W);
|
||||
tte.tte_tag = TT_CTX(dst_pmap->pm_context) | TT_VA(va);
|
||||
m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
|
||||
if ((tp->tte_data & TD_PV) != 0) {
|
||||
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
|
||||
("pmap_enter: unmanaged pv page"));
|
||||
pv_insert(dst_pmap, m, va);
|
||||
tte.tte_data |= TD_PV;
|
||||
if (pmap_cache_enter(m, va) != 0)
|
||||
tte.tte_data |= TD_CV;
|
||||
}
|
||||
tsb_tte_enter(dst_pmap, m, va, tte);
|
||||
}
|
||||
return (1);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
vm_size_t len, vm_offset_t src_addr)
|
||||
{
|
||||
/* XXX */
|
||||
struct tte *tp;
|
||||
vm_offset_t va;
|
||||
|
||||
if (dst_addr != src_addr)
|
||||
return;
|
||||
if (len > PMAP_TSB_THRESH) {
|
||||
tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, pmap_copy_tte);
|
||||
tlb_context_demap(dst_pmap->pm_context);
|
||||
} else {
|
||||
for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) {
|
||||
if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
|
||||
pmap_copy_tte(src_pmap, dst_pmap, tp, va);
|
||||
}
|
||||
tlb_range_demap(dst_pmap->pm_context, src_addr, src_addr + len - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -207,19 +207,20 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
|
||||
* an optimization.
|
||||
*/
|
||||
void
|
||||
tsb_foreach(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
|
||||
tsb_callback_t *callback)
|
||||
{
|
||||
vm_offset_t va;
|
||||
struct tte *tp;
|
||||
int i;
|
||||
|
||||
TSB_STATS_INC(tsb_nforeach);
|
||||
for (i = 0; i < TSB_SIZE; i++) {
|
||||
tp = &pm->pm_tsb[i];
|
||||
tp = &pm1->pm_tsb[i];
|
||||
if ((tp->tte_data & TD_V) != 0) {
|
||||
va = tte_get_va(*tp);
|
||||
if (va >= start && va < end) {
|
||||
if (!callback(pm, tp, va))
|
||||
if (!callback(pm1, pm2, tp, va))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user