From f7c81a5182e30bbb706c8825d3e17ee53c583674 Mon Sep 17 00:00:00 2001 From: Jake Burkholder Date: Mon, 20 May 2002 16:10:17 +0000 Subject: [PATCH] De-inline the tlb demap functions. These were so big that gcc3.1 refused to inline them anyway. ;) --- sys/conf/files.sparc64 | 1 + sys/sparc64/include/tlb.h | 103 +--------------------------- sys/sparc64/sparc64/tlb.c | 140 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 100 deletions(-) create mode 100644 sys/sparc64/sparc64/tlb.c diff --git a/sys/conf/files.sparc64 b/sys/conf/files.sparc64 index e6ddeb0b3a00..de7071933f2f 100644 --- a/sys/conf/files.sparc64 +++ b/sys/conf/files.sparc64 @@ -63,6 +63,7 @@ sparc64/sparc64/support.s standard sparc64/sparc64/sys_machdep.c standard sparc64/sparc64/swtch.s standard sparc64/sparc64/tick.c standard +sparc64/sparc64/tlb.c standard sparc64/sparc64/trap.c standard sparc64/sparc64/tsb.c standard sparc64/sparc64/vm_machdep.c standard diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h index 9a84de0275f3..2a4693329291 100644 --- a/sys/sparc64/include/tlb.h +++ b/sys/sparc64/include/tlb.h @@ -83,106 +83,9 @@ extern int kernel_tlb_slots; extern struct tte *kernel_ttes; -/* - * Some tlb operations must be atomic, so no interrupt or trap can be allowed - * while they are in progress. Traps should not happen, but interrupts need to - * be explicitely disabled. critical_enter() cannot be used here, since it only - * disables soft interrupts. - */ - -static __inline void -tlb_context_demap(struct pmap *pm) -{ - void *cookie; - u_long s; - - /* - * It is important that we are not interrupted or preempted while - * doing the IPIs. The interrupted CPU may hold locks, and since - * it will wait for the CPU that sent the IPI, this can lead - * to a deadlock when an interrupt comes in on that CPU and it's - * handler tries to grab one of that locks. This will only happen for - * spin locks, but these IPI types are delivered even if normal - * interrupts are disabled, so the lock critical section will not - * protect the target processor from entering the IPI handler with - * the lock held. - */ - critical_enter(); - cookie = ipi_tlb_context_demap(pm); - if (pm->pm_active & PCPU_GET(cpumask)) { - KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, - ("tlb_context_demap: inactive pmap?")); - s = intr_disable(); - stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0); - stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0); - membar(Sync); - intr_restore(s); - } - ipi_wait(cookie); - critical_exit(); -} - -static __inline void -tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va) -{ - u_long flags; - void *cookie; - u_long s; - - critical_enter(); - cookie = ipi_tlb_page_demap(tlb, pm, va); - if (pm->pm_active & PCPU_GET(cpumask)) { - KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, - ("tlb_page_demap: inactive pmap?")); - if (pm == kernel_pmap) - flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; - else - flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; - - s = intr_disable(); - if (tlb & TLB_DTLB) { - stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); - membar(Sync); - } - if (tlb & TLB_ITLB) { - stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); - membar(Sync); - } - intr_restore(s); - } - ipi_wait(cookie); - critical_exit(); -} - -static __inline void -tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) -{ - vm_offset_t va; - void *cookie; - u_long flags; - u_long s; - - critical_enter(); - cookie = ipi_tlb_range_demap(pm, start, end); - if (pm->pm_active & PCPU_GET(cpumask)) { - KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, - ("tlb_range_demap: inactive pmap?")); - if (pm == kernel_pmap) - flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; - else - flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; - - s = intr_disable(); - for (va = start; va < end; va += PAGE_SIZE) { - stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); - stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); - membar(Sync); - } - intr_restore(s); - } - ipi_wait(cookie); - critical_exit(); -} +void tlb_context_demap(struct pmap *pm); +void tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va); +void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end); #define tlb_tte_demap(tte, pm) \ tlb_page_demap(TD_GET_TLB((tte).tte_data), pm, \ diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c new file mode 100644 index 000000000000..f2ecf02a944e --- /dev/null +++ b/sys/sparc64/sparc64/tlb.c @@ -0,0 +1,140 @@ +/*- + * Copyright (c) 2001 Jake Burkholder. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +/* + * Some tlb operations must be atomic, so no interrupt or trap can be allowed + * while they are in progress. Traps should not happen, but interrupts need to + * be explicitely disabled. critical_enter() cannot be used here, since it only + * disables soft interrupts. + */ + +void +tlb_context_demap(struct pmap *pm) +{ + void *cookie; + u_long s; + + /* + * It is important that we are not interrupted or preempted while + * doing the IPIs. The interrupted CPU may hold locks, and since + * it will wait for the CPU that sent the IPI, this can lead + * to a deadlock when an interrupt comes in on that CPU and it's + * handler tries to grab one of that locks. This will only happen for + * spin locks, but these IPI types are delivered even if normal + * interrupts are disabled, so the lock critical section will not + * protect the target processor from entering the IPI handler with + * the lock held. + */ + critical_enter(); + cookie = ipi_tlb_context_demap(pm); + if (pm->pm_active & PCPU_GET(cpumask)) { + KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, + ("tlb_context_demap: inactive pmap?")); + s = intr_disable(); + stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0); + stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0); + membar(Sync); + intr_restore(s); + } + ipi_wait(cookie); + critical_exit(); +} + +void +tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va) +{ + u_long flags; + void *cookie; + u_long s; + + critical_enter(); + cookie = ipi_tlb_page_demap(tlb, pm, va); + if (pm->pm_active & PCPU_GET(cpumask)) { + KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, + ("tlb_page_demap: inactive pmap?")); + if (pm == kernel_pmap) + flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; + else + flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; + + s = intr_disable(); + if (tlb & TLB_DTLB) { + stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); + membar(Sync); + } + if (tlb & TLB_ITLB) { + stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); + membar(Sync); + } + intr_restore(s); + } + ipi_wait(cookie); + critical_exit(); +} + +void +tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) +{ + vm_offset_t va; + void *cookie; + u_long flags; + u_long s; + + critical_enter(); + cookie = ipi_tlb_range_demap(pm, start, end); + if (pm->pm_active & PCPU_GET(cpumask)) { + KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, + ("tlb_range_demap: inactive pmap?")); + if (pm == kernel_pmap) + flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; + else + flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; + + s = intr_disable(); + for (va = start; va < end; va += PAGE_SIZE) { + stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); + stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); + membar(Sync); + } + intr_restore(s); + } + ipi_wait(cookie); + critical_exit(); +}