These were repo-copied to have a .S extension.

This commit is contained in:
Jake Burkholder 2002-07-31 15:56:15 +00:00
parent 5c153c5bb0
commit da1416c80c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=101072
6 changed files with 0 additions and 4398 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,183 +0,0 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/syscall.h>
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/pstate.h>
#include <machine/upa.h>
#include "assym.s"
.register %g2,#ignore
.globl kernbase
.set kernbase,KERNBASE
/*
* void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3,
* u_long ofw_vec)
*/
ENTRY(btext)
ENTRY(_start)
/*
* Initialize misc state to known values. Interrupts disabled, normal
* globals, windows flushed (cr = 0, cs = nwindows - 1), no clean
* windows, pil 0, and floating point disabled.
*/
wrpr %g0, PSTATE_NORMAL, %pstate
flushw
wrpr %g0, 0, %cleanwin
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu in
* the per-cpu page.
*/
SET(pcpu0 + (PCPU_PAGES * PAGE_SIZE) - PC_SIZEOF, %l1, %l0)
sub %l0, SPOFF + CCFSZ, %sp
/*
* Enable interrupts.
*/
wrpr %g0, PSTATE_KERNEL, %pstate
/*
* Do initial bootstrap to setup pmap and thread0.
*/
call sparc64_init
nop
/*
* Get onto thread0's kstack.
*/
sub PCB_REG, SPOFF + CCFSZ, %sp
/*
* And away we go. This doesn't return.
*/
call mi_startup
nop
sir
! NOTREACHED
END(_start)
/*
* void cpu_setregs(struct pcpu *pc)
*/
ENTRY(cpu_setregs)
ldx [%o0 + PC_CURPCB], %o1
/*
* Disable interrupts, normal globals.
*/
wrpr %g0, PSTATE_NORMAL, %pstate
/*
* Normal %g6 points to the current thread's pcb, and %g7 points to
* the per-cpu data structure.
*/
mov %o1, PCB_REG
mov %o0, PCPU_REG
/*
* Alternate globals.
*/
wrpr %g0, PSTATE_ALT, %pstate
/*
* Alternate %g5 points to a per-cpu panic stack, %g6 points to the
* current thread's pcb, and %g7 points to the per-cpu data structure.
*/
mov %o0, ASP_REG
mov %o1, PCB_REG
mov %o0, PCPU_REG
/*
* Interrupt globals.
*/
wrpr %g0, PSTATE_INTR, %pstate
/*
* Interrupt %g7 points to the per-cpu data structure.
*/
mov %o0, PCPU_REG
/*
* MMU globals.
*/
wrpr %g0, PSTATE_MMU, %pstate
/*
* MMU %g7 points to the user tsb. Initialize it to something sane
* here to catch invalid use.
*/
mov %g0, TSB_REG
/*
* Normal globals again.
*/
wrpr %g0, PSTATE_NORMAL, %pstate
/*
* Force trap level 1 and take over the trap table.
*/
SET(tl0_base, %o2, %o1)
wrpr %g0, 1, %tl
wrpr %o1, 0, %tba
/*
* Re-enable interrupts.
*/
wrpr %g0, PSTATE_KERNEL, %pstate
retl
nop
END(cpu_setregs)
/*
* Signal trampoline, copied out to user stack. Must be 16 byte aligned or
* the argv and envp pointers can become misaligned.
*/
ENTRY(sigcode)
call %o4
nop
add %sp, SPOFF + CCFSZ + SF_UC, %o0
mov SYS_sigreturn, %g1
ta %xcc, 9
mov SYS_exit, %g1
ta %xcc, 9
illtrap
.align 16
esigcode:
END(sigcode)
DATA(szsigcode)
.long esigcode - sigcode

View File

@ -1,245 +0,0 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/ktr.h>
#include <machine/asmacros.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
#define IPI_WAIT(r1, r2, r3, r4) \
lduw [PCPU(CPUMASK)], r4 ; \
ATOMIC_CLEAR_INT(r1, r2, r3, r4) ; \
9: lduw [r1], r2 ; \
brnz,a,pn r2, 9b ; \
nop
/*
* Invalidate a phsyical page in the data cache.
*/
ENTRY(tl_ipi_dcache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_dcache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ICA_PA], %g2
stx %g2, [%g1 + KTR_PARM1]
9:
#endif
ldx [%g5 + ICA_PA], %g6
srlx %g6, PAGE_SHIFT - DC_TAG_SHIFT, %g6
SET(cache, %g3, %g2)
lduw [%g2 + DC_SIZE], %g3
lduw [%g2 + DC_LINESIZE], %g4
sub %g3, %g4, %g2
1: ldxa [%g2] ASI_DCACHE_TAG, %g1
srlx %g1, DC_VALID_SHIFT, %g3
andcc %g3, DC_VALID_MASK, %g0
bz,pt %xcc, 2f
set DC_TAG_MASK, %g3
sllx %g3, DC_TAG_SHIFT, %g3
and %g1, %g3, %g1
cmp %g1, %g6
bne,a,pt %xcc, 2f
nop
stxa %g1, [%g2] ASI_DCACHE_TAG
membar #Sync
2: brgz,pt %g2, 1b
sub %g2, %g4, %g2
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_dcache_page_inval)
/*
* Invalidate a phsyical page in the instruction cache.
*/
ENTRY(tl_ipi_icache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_icache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ICA_PA], %g2
stx %g2, [%g1 + KTR_PARM1]
9:
#endif
ldx [%g5 + ICA_PA], %g6
srlx %g6, PAGE_SHIFT - IC_TAG_SHIFT, %g6
SET(cache, %g3, %g2)
lduw [%g2 + IC_SIZE], %g3
lduw [%g2 + IC_LINESIZE], %g4
sub %g3, %g4, %g2
1: ldda [%g2] ASI_ICACHE_TAG, %g0 /*, %g1 */
srlx %g1, IC_VALID_SHIFT, %g3
andcc %g3, IC_VALID_MASK, %g0
bz,pt %xcc, 2f
set IC_TAG_MASK, %g3
sllx %g3, IC_TAG_SHIFT, %g3
and %g1, %g3, %g1
cmp %g1, %g6
bne,a,pt %xcc, 2f
nop
stxa %g1, [%g2] ASI_ICACHE_TAG
membar #Sync
2: brgz,pt %g2, 1b
sub %g2, %g4, %g2
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_icache_page_inval)
/*
* Trigger a softint at the desired level.
*/
ENTRY(tl_ipi_level)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "tl_ipi_level: cpuid=%d mid=%d d1=%#lx d2=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [PCPU(CPUID)], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [PCPU(MID)], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %g4, [%g1 + KTR_PARM3]
stx %g5, [%g1 + KTR_PARM4]
9:
#endif
mov 1, %g1
sllx %g1, %g5, %g1
wr %g1, 0, %asr20
retry
END(tl_ipi_level)
/*
* Demap a page from the dtlb and/or itlb.
*/
ENTRY(tl_ipi_tlb_page_demap)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_tlb_page_demap: pm=%p va=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ITA_PMAP], %g2
stx %g2, [%g1 + KTR_PARM1]
ldx [%g5 + ITA_VA], %g2
stx %g2, [%g1 + KTR_PARM2]
9:
#endif
ldx [%g5 + ITA_PMAP], %g1
SET(kernel_pmap_store, %g3, %g2)
mov TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g3
cmp %g1, %g2
movne %xcc, TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, %g3
ldx [%g5 + ITA_VA], %g2
or %g2, %g3, %g2
stxa %g0, [%g2] ASI_DMMU_DEMAP
stxa %g0, [%g2] ASI_IMMU_DEMAP
membar #Sync
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_page_demap)
/*
* Demap a range of pages from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_range_demap)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_tlb_range_demap: pm=%p start=%#lx end=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ITA_PMAP], %g2
stx %g2, [%g1 + KTR_PARM1]
ldx [%g5 + ITA_START], %g2
stx %g2, [%g1 + KTR_PARM2]
ldx [%g5 + ITA_END], %g2
stx %g2, [%g1 + KTR_PARM3]
9:
#endif
ldx [%g5 + ITA_PMAP], %g1
SET(kernel_pmap_store, %g3, %g2)
mov TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, %g3
cmp %g1, %g2
movne %xcc, TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, %g3
ldx [%g5 + ITA_START], %g1
ldx [%g5 + ITA_END], %g2
set PAGE_SIZE, %g6
1: or %g1, %g3, %g4
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
membar #Sync
add %g1, %g6, %g1
cmp %g1, %g2
blt,a,pt %xcc, 1b
nop
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_range_demap)
/*
* Demap the primary context from the dtlb and itlb.
*/
ENTRY(tl_ipi_tlb_context_demap)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_tlb_page_demap: pm=%p va=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [%g5 + ITA_PMAP], %g2
stx %g2, [%g1 + KTR_PARM1]
ldx [%g5 + ITA_VA], %g2
stx %g2, [%g1 + KTR_PARM2]
9:
#endif
mov TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, %g1
stxa %g0, [%g1] ASI_DMMU_DEMAP
stxa %g0, [%g1] ASI_IMMU_DEMAP
membar #Sync
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
END(tl_ipi_tlb_context_demap)

View File

@ -1,192 +0,0 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include <machine/upa.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
.text
_ALIGN_TEXT
1: rd %pc, %l0
ldx [%l0 + (4f-1b)], %l1
add %l0, (6f-1b), %l2
clr %l3
2: cmp %l3, %l1
be %xcc, 3f
nop
ldx [%l2 + TTE_VPN], %l4
ldx [%l2 + TTE_DATA], %l5
sllx %l4, PAGE_SHIFT, %l4
wr %g0, ASI_DMMU, %asi
stxa %l4, [%g0 + AA_DMMU_TAR] %asi
stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG
wr %g0, ASI_IMMU, %asi
stxa %l4, [%g0 + AA_IMMU_TAR] %asi
stxa %l5, [%g0] ASI_ITLB_DATA_IN_REG
membar #Sync
flush %l4
add %l2, 1 << TTE_SHIFT, %l2
add %l3, 1, %l3
ba %xcc, 2b
nop
3: ldx [%l0 + (5f-1b)], %l1
jmpl %l1, %g0
nop
_ALIGN_DATA
4: .xword 0x0
5: .xword 0x0
6:
DATA(mp_tramp_code)
.xword 1b
DATA(mp_tramp_code_len)
.xword 6b-1b
DATA(mp_tramp_tlb_slots)
.xword 4b-1b
DATA(mp_tramp_func)
.xword 5b-1b
/*
* void mp_startup(void)
*/
ENTRY(mp_startup)
wrpr %g0, PSTATE_NORMAL, %pstate
wrpr %g0, 0, %cleanwin
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
SET(cpu_start_args, %l1, %l0)
mov CPU_CLKSYNC, %l1
membar #StoreLoad
stw %l1, [%l0 + CSA_STATE]
1: ldx [%l0 + CSA_TICK], %l1
brz %l1, 1b
nop
wrpr %l1, 0, %tick
UPA_GET_MID(%o0)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
rdpr %ver, %l1
stx %l1, [%l0 + CSA_VER]
/*
* Inform the boot processor we have inited.
*/
mov CPU_INIT, %l1
membar #LoadStore
stw %l1, [%l0 + CSA_STATE]
/*
* Wait till its our turn to bootstrap.
*/
2: lduw [%l0 + CSA_MID], %l1
cmp %l1, %o0
bne %xcc, 2b
nop
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
add %l0, CSA_TTES, %l1
clr %l2
/*
* Map the per-cpu pages.
*/
3: sllx %l2, TTE_SHIFT, %l3
add %l1, %l3, %l3
ldx [%l3 + TTE_VPN], %l4
ldx [%l3 + TTE_DATA], %l5
wr %g0, ASI_DMMU, %asi
sllx %l4, PAGE_SHIFT, %l4
stxa %l4, [%g0 + AA_DMMU_TAR] %asi
stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG
membar #Sync
add %l2, 1, %l2
cmp %l2, PCPU_PAGES
bne %xcc, 3b
nop
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu
* in the per-cpu page.
*/
ldx [%l0 + CSA_PCPU], %l1
set PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2
add %l1, %l2, %l1
sub %l1, SPOFF + CCFSZ, %sp
/*
* Enable interrupts.
*/
wrpr %g0, PSTATE_KERNEL, %pstate
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP,
"_mp_start: bootstrap cpuid=%d mid=%d pcpu=%#lx data=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
lduw [%l1 + PC_CPUID], %g2
stx %g2, [%g1 + KTR_PARM1]
lduw [%l1 + PC_MID], %g2
stx %g2, [%g1 + KTR_PARM2]
stx %l1, [%g1 + KTR_PARM3]
stx %sp, [%g1 + KTR_PARM5]
9:
#endif
/*
* And away we go. This doesn't return.
*/
call cpu_mp_bootstrap
mov %l1, %o0
sir
! NOTREACHED
END(mp_startup)

View File

@ -1,647 +0,0 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asi.h>
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
.register %g6, #ignore
#define E /* empty */
/*
* Generate load and store instructions for the corresponding width and asi
* (or not). Note that we want to evaluate the macro args before
* concatenating, so that E really turns into nothing.
*/
#define _LD(w, a) ld ## w ## a
#define _ST(w, a) st ## w ## a
#define LD(w, a) _LD(w, a)
#define ST(w, a) _ST(w, a)
/*
* Common code for copy routines.
*
* We use large macros to generate functions for each of the copy routines.
* This allows the load and store instructions to be generated for the right
* operation, asi or not. It is possible to write an asi independent function
* but this would require 2 expensive wrs in the main loop to switch %asi.
* It would also screw up profiling (if we ever get it), but may save some I$.
* We assume that either one of dasi and sasi is empty, or that they are both
* the same (empty or non-empty). It is up to the caller to set %asi.
*/
/*
* ASI independent implementation of copystr(9).
* Used to implement copyinstr() and copystr().
*
* Return value is in %g1.
*/
#define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
brz len, 4f ; \
mov src, %g2 ; \
1: deccc 1, len ; \
bl,a,pn %xcc, 3f ; \
nop ; \
LD(ub, sa) [src] sasi, %g1 ; \
ST(b, da) %g1, [dst] dasi ; \
brz,pn %g1, 3f ; \
inc src ; \
b %xcc, 1b ; \
inc dst ; \
2: mov ENAMETOOLONG, %g1 ; \
3: sub src, %g2, %g2 ; \
brnz,a done, 4f ; \
stx %g2, [done] ; \
4:
/*
* ASI independent implementation of memset(3).
* Used to implement bzero(), memset() and aszero().
*
* If the pattern is non-zero, duplicate it to fill 64 bits.
* Store bytes until dst is 8-byte aligned, then store 8 bytes.
* It has yet to be determined how much unrolling is beneficial.
* Could also read and compare before writing to minimize snoop traffic.
*
* XXX bzero() should be implemented as
* #define bzero(dst, len) (void)memset((dst), 0, (len))
* if at all.
*/
#define _MEMSET(dst, pat, len, da, dasi) \
brlez,pn len, 5f ; \
and pat, 0xff, pat ; \
brz,pt pat, 1f ; \
sllx pat, 8, %g1 ; \
or pat, %g1, pat ; \
sllx pat, 16, %g1 ; \
or pat, %g1, pat ; \
sllx pat, 32, %g1 ; \
or pat, %g1, pat ; \
_ALIGN_TEXT ; \
1: deccc 1, len ; \
bl,pn %xcc, 5f ; \
btst 7, dst ; \
bz,a,pt %xcc, 2f ; \
inc 1, len ; \
ST(b, da) pat, [dst] dasi ; \
b %xcc, 1b ; \
inc dst ; \
_ALIGN_TEXT ; \
2: deccc 32, len ; \
bl,a,pn %xcc, 3f ; \
inc 32, len ; \
ST(x, da) pat, [dst] dasi ; \
ST(x, da) pat, [dst + 8] dasi ; \
ST(x, da) pat, [dst + 16] dasi ; \
ST(x, da) pat, [dst + 24] dasi ; \
b %xcc, 2b ; \
inc 32, dst ; \
_ALIGN_TEXT ; \
3: deccc 8, len ; \
bl,a,pn %xcc, 4f ; \
inc 8, len ; \
ST(x, da) pat, [dst] dasi ; \
b %xcc, 3b ; \
inc 8, dst ; \
_ALIGN_TEXT ; \
4: deccc 1, len ; \
bl,a,pn %xcc, 5f ; \
nop ; \
ST(b, da) pat, [dst] dasi ; \
b %xcc, 4b ; \
inc 1, dst ; \
5:
/*
* ASI independent implementation of memcpy(3).
* Used to implement bcopy(), copyin(), copyout(), memcpy(), ascopy(),
* ascopyfrom() and ascopyto().
*
* Transfer bytes until dst is 8-byte aligned. If src is then also 8 byte
* aligned, transfer 8 bytes, otherwise finish with bytes. The unaligned
* case could be optimized, but it is expected that this is the uncommon
* case and of questionable value. The code to do so is also rather large
* and ugly. It has yet to be determined how much unrolling is beneficial.
*
* XXX bcopy() must also check for overlap. This is stupid.
* XXX bcopy() should be implemented as
* #define bcopy(src, dst, len) (void)memcpy((dst), (src), (len))
* if at all.
*/
#define _MEMCPY(dst, src, len, da, dasi, sa, sasi) \
1: deccc 1, len ; \
bl,pn %xcc, 6f ; \
btst 7, dst ; \
bz,a,pt %xcc, 2f ; \
inc 1, len ; \
LD(ub, sa) [src] sasi, %g1 ; \
ST(b, da) %g1, [dst] dasi ; \
inc 1, src ; \
b %xcc, 1b ; \
inc 1, dst ; \
_ALIGN_TEXT ; \
2: btst 7, src ; \
bz,a,pt %xcc, 3f ; \
nop ; \
b,a %xcc, 5f ; \
_ALIGN_TEXT ; \
3: deccc 32, len ; \
bl,a,pn %xcc, 4f ; \
inc 32, len ; \
LD(x, sa) [src] sasi, %g1 ; \
LD(x, sa) [src + 8] sasi, %g2 ; \
LD(x, sa) [src + 16] sasi, %g3 ; \
LD(x, sa) [src + 24] sasi, %g4 ; \
ST(x, da) %g1, [dst] dasi ; \
ST(x, da) %g2, [dst + 8] dasi ; \
ST(x, da) %g3, [dst + 16] dasi ; \
ST(x, da) %g4, [dst + 24] dasi ; \
inc 32, src ; \
b %xcc, 3b ; \
inc 32, dst ; \
_ALIGN_TEXT ; \
4: deccc 8, len ; \
bl,a,pn %xcc, 5f ; \
inc 8, len ; \
LD(x, sa) [src] sasi, %g1 ; \
ST(x, da) %g1, [dst] dasi ; \
inc 8, src ; \
b %xcc, 4b ; \
inc 8, dst ; \
_ALIGN_TEXT ; \
5: deccc 1, len ; \
bl,a,pn %xcc, 6f ; \
nop ; \
LD(ub, sa) [src] sasi, %g1 ; \
ST(b, da) %g1, [dst] dasi ; \
inc src ; \
b %xcc, 5b ; \
inc dst ; \
6:
#define CATCH_SETUP(label) \
SET(label, %g2, %g1) ; \
stx %g1, [PCB_REG + PCB_ONFAULT]
#define CATCH_END() \
stx %g0, [PCB_REG + PCB_ONFAULT]
#define FU_ALIGNED(loader, label) \
CATCH_SETUP(label) ; \
loader [%o0] ASI_AIUP, %o0 ; \
retl ; \
CATCH_END()
#define FU_BYTES(loader, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
FU_ALIGNED(loader, label)
#define SU_ALIGNED(storer, label) \
CATCH_SETUP(label) ; \
storer %o1, [%o0] ASI_AIUP ; \
CATCH_END() ; \
retl ; \
clr %o0
#define SU_BYTES(storer, size, label) \
btst (size) - 1, %o0 ; \
bnz,pn %xcc, .Lfsalign ; \
EMPTY ; \
SU_ALIGNED(storer, label)
/*
* int bcmp(const void *b1, const void *b2, size_t len)
*/
ENTRY(bcmp)
brz,pn %o2, 2f
clr %o3
1: ldub [%o0 + %o3], %o4
ldub [%o1 + %o3], %o5
cmp %o4, %o5
bne,pn %xcc, 2f
inc %o3
deccc %o2
bne,pt %xcc, 1b
nop
2: retl
mov %o2, %o0
END(bcmp)
/*
* void bcopy(const void *src, void *dst, size_t len)
*/
ENTRY(bcopy)
ENTRY(ovbcopy)
/*
* Check for overlap, and copy backwards if so.
*/
sub %o1, %o0, %g1
cmp %g1, %o2
bgeu,a,pt %xcc, 3f
nop
/*
* Copy backwards.
*/
add %o0, %o2, %o0
add %o1, %o2, %o1
1: deccc 1, %o2
bl,a,pn %xcc, 2f
nop
dec 1, %o0
ldub [%o0], %g1
dec 1, %o1
b %xcc, 1b
stb %g1, [%o1]
2: retl
nop
/*
* Do the fast version.
*/
3: _MEMCPY(%o1, %o0, %o2, E, E, E, E)
retl
nop
END(bcopy)
/*
* void bzero(void *b, size_t len)
*/
ENTRY(bzero)
_MEMSET(%o0, %g0, %o1, E, E)
retl
nop
END(bzero)
/*
* void ascopy(u_long asi, vm_offset_t src, vm_offset_t dst, size_t len)
*/
ENTRY(ascopy)
wr %o0, 0, %asi
_MEMCPY(%o2, %o1, %o3, a, %asi, a, %asi)
retl
nop
END(ascopy)
/*
* void ascopyfrom(u_long sasi, vm_offset_t src, caddr_t dst, size_t len)
*/
ENTRY(ascopyfrom)
wr %o0, 0, %asi
_MEMCPY(%o2, %o1, %o3, E, E, a, %asi)
retl
nop
END(ascopyfrom)
/*
* void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len)
*/
ENTRY(ascopyto)
wr %o1, 0, %asi
_MEMCPY(%o2, %o0, %o3, a, %asi, E, E)
retl
nop
END(ascopyto)
/*
* void aszero(u_long asi, vm_offset_t pa, size_t len)
*/
ENTRY(aszero)
wr %o0, 0, %asi
_MEMSET(%o1, %g0, %o2, a, %asi)
retl
nop
END(aszero)
/*
* void *memcpy(void *dst, const void *src, size_t len)
*/
ENTRY(memcpy)
mov %o0, %o3
_MEMCPY(%o3, %o1, %o2, E, E, E, E)
retl
nop
END(memcpy)
/*
* void *memset(void *b, int c, size_t len)
*/
ENTRY(memset)
mov %o0, %o3
_MEMSET(%o3, %o1, %o2, E, E)
retl
nop
END(memset)
/*
* int copyin(const void *uaddr, void *kaddr, size_t len)
*/
ENTRY(copyin)
CATCH_SETUP(.Lefault)
wr %g0, ASI_AIUP, %asi
_MEMCPY(%o1, %o0, %o2, E, E, a, %asi)
CATCH_END()
retl
clr %o0
END(copyin)
/*
* int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
*/
ENTRY(copyinstr)
CATCH_SETUP(.Lefault)
wr %g0, ASI_AIUP, %asi
_COPYSTR(%o0, %o1, %o2, %o3, a, %asi, E, E)
CATCH_END()
retl
mov %g1, %o0
END(copyinstr)
/*
* int copyout(const void *kaddr, void *uaddr, size_t len)
*/
ENTRY(copyout)
CATCH_SETUP(.Lefault)
wr %g0, ASI_AIUP, %asi
_MEMCPY(%o1, %o0, %o2, a, %asi, E, E)
CATCH_END()
retl
clr %o0
END(copyout)
.Lefault:
CATCH_END()
retl
mov EFAULT, %o0
/*
* int copystr(const void *src, void *dst, size_t len, size_t *done)
*/
ENTRY(copystr)
_COPYSTR(%o0, %o1, %o2, %o3, E, E, E, E)
retl
mov %g1, %o0
END(copystr)
/*
* int fubyte(const void *base)
*/
ENTRY(fubyte)
FU_ALIGNED(lduba, .Lfsfault)
END(fubyte)
/*
* long fuword(const void *base)
*/
ENTRY(fuword)
FU_BYTES(ldxa, 8, .Lfsfault)
END(fuword)
/*
* int fuswintr(const void *base)
*/
ENTRY(fuswintr)
FU_BYTES(lduha, 2, fsbail)
END(fuswintr)
/*
* int16_t fuword16(const void *base)
*/
ENTRY(fuword16)
FU_BYTES(lduha, 2, .Lfsfault)
END(fuword16)
/*
* int32_t fuword32(const void *base)
*/
ENTRY(fuword32)
FU_BYTES(lduwa, 4, .Lfsfault)
END(fuword32)
/*
* int64_t fuword64(const void *base)
*/
ENTRY(fuword64)
FU_BYTES(ldxa, 8, .Lfsfault)
END(fuword64)
/*
* int subyte(const void *base, int byte)
*/
ENTRY(subyte)
SU_ALIGNED(stba, .Lfsfault)
END(subyte)
/*
* int suword(const void *base, long word)
*/
ENTRY(suword)
SU_BYTES(stxa, 8, .Lfsfault)
END(suword)
/*
* int suswintr(const void *base, int word)
*/
ENTRY(suswintr)
SU_BYTES(stwa, 2, fsbail)
END(suswintr)
/*
* int suword16(const void *base, int16_t word)
*/
ENTRY(suword16)
SU_BYTES(stha, 2, .Lfsfault)
END(suword16)
/*
* int suword32(const void *base, int32_t word)
*/
ENTRY(suword32)
SU_BYTES(stwa, 4, .Lfsfault)
END(suword32)
/*
* int suword64(const void *base, int64_t word)
*/
ENTRY(suword64)
SU_BYTES(stxa, 8, .Lfsfault)
END(suword64)
_ALIGN_TEXT
.Lfsalign:
retl
mov -1, %o0
_ALIGN_TEXT
.Lfsfault:
CATCH_END()
retl
mov -1, %o0
ENTRY(fsbail)
CATCH_END()
retl
mov -1, %o0
END(fsbail)
ENTRY(longjmp)
set 1, %g3
movrz %o1, %o1, %g3
mov %o0, %g1
ldx [%g1 + _JB_FP], %g2
1: cmp %fp, %g2
bl,a,pt %xcc, 1b
restore
bne,pn %xcc, 2f
ldx [%g1 + _JB_SP], %o2
cmp %o2, %sp
blt,pn %xcc, 2f
movge %xcc, %o2, %sp
ldx [%g1 + _JB_PC], %o7
retl
mov %g3, %o0
2: PANIC("longjmp botch", %l1)
END(longjmp)
ENTRY(setjmp)
stx %sp, [%o0 + _JB_SP]
stx %o7, [%o0 + _JB_PC]
stx %fp, [%o0 + _JB_FP]
retl
clr %o0
END(setjmp)
/*
* void openfirmware(cell_t args[])
*/
ENTRY(openfirmware)
save %sp, -CCFSZ, %sp
SET(ofw_vec, %l7, %l6)
ldx [%l6], %l6
rdpr %pil, %l7
wrpr %g0, PIL_TICK, %pil
call %l6
mov %i0, %o0
wrpr %l7, 0, %pil
ret
restore %o0, %g0, %o0
END(openfirmware)
/*
* void ofw_exit(cell_t args[])
*/
ENTRY(openfirmware_exit)
save %sp, -CCFSZ, %sp
flushw
wrpr %g0, PIL_TICK, %pil
SET(ofw_tba, %l7, %l5)
ldx [%l5], %l5
wrpr %l5, 0, %tba ! restore the ofw trap table
SET(ofw_vec, %l7, %l6)
ldx [%l6], %l6
SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l7, %l0)
sub %l0, SPOFF, %fp ! setup a stack in a locked page
sub %l0, SPOFF + CCFSZ, %sp
mov AA_DMMU_PCXR, %l3 ! set context 0
stxa %g0, [%l3] ASI_DMMU
membar #Sync
wrpr %g0, 0, %tl ! force trap level 0
call %l6
mov %i0, %o0
! never to return
END(openfirmware_exit)
#ifdef GPROF
ENTRY(user)
nop
ENTRY(btrap)
nop
ENTRY(etrap)
nop
ENTRY(bintr)
nop
ENTRY(eintr)
nop
/*
* XXX including sys/gmon.h in genassym.c is not possible due to uintfptr_t
* badness.
*/
#define GM_STATE 0x0
#define GMON_PROF_OFF 3
#define GMON_PROF_HIRES 4
.globl _mcount
.set _mcount, __cyg_profile_func_enter
ENTRY(__cyg_profile_func_enter)
SET(_gmonparam, %o3, %o2)
lduw [%o2 + GM_STATE], %o3
cmp %o3, GMON_PROF_OFF
be,a,pn %icc, 1f
nop
SET(mcount, %o3, %o2)
jmpl %o2, %g0
nop
1: retl
nop
END(__cyg_profile_func_enter)
#ifdef GUPROF
ENTRY(__cyg_profile_func_exit)
SET(_gmonparam, %o3, %o2)
lduw [%o2 + GM_STATE], %o3
cmp %o3, GMON_PROF_HIRES
be,a,pn %icc, 1f
nop
SET(mexitcount, %o3, %o2)
jmpl %o2, %g0
nop
1: retl
nop
END(__cyg_profile_func_exit)
#endif /* GUPROF */
#endif /* GPROF */

View File

@ -1,307 +0,0 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include <machine/asi.h>
#include <machine/ktr.h>
#include <machine/tstate.h>
.register %g2, #ignore
.register %g3, #ignore
#include "assym.s"
ENTRY(cpu_throw)
save %sp, -CCFSZ, %sp
call choosethread
ldx [PCPU(CURTHREAD)], %l0
flushw
b,a %xcc, .Lsw1
nop
END(cpu_throw)
ENTRY(cpu_switch)
/*
* Choose a new thread. If its the same as the current one, do
* nothing.
*/
save %sp, -CCFSZ, %sp
call choosethread
ldx [PCPU(CURTHREAD)], %l0
cmp %l0, %o0
be,a,pn %xcc, 4f
nop
ldx [%l0 + TD_PCB], %l1
/*
* If the current thread was using floating point, save its context.
*/
ldx [%l0 + TD_FRAME], %l2
ldub [%l2 + TF_FPRS], %l3
andcc %l3, FPRS_FEF, %g0
bz,a,pt %xcc, 1f
nop
wr %g0, FPRS_FEF, %fprs
wr %g0, ASI_BLK_S, %asi
stda %f0, [%l1 + PCB_FPSTATE + FP_FB0] %asi
stda %f16, [%l1 + PCB_FPSTATE + FP_FB1] %asi
stda %f32, [%l1 + PCB_FPSTATE + FP_FB2] %asi
stda %f48, [%l1 + PCB_FPSTATE + FP_FB3] %asi
membar #Sync
wr %g0, 0, %fprs
andn %l3, FPRS_FEF, %l3
stb %l3, [%l2 + TF_FPRS]
/*
* Flush the windows out to the stack and save the current frame
* pointer and program counter.
*/
1: flushw
wrpr %g0, 0, %cleanwin
stx %fp, [%l1 + PCB_FP]
stx %i7, [%l1 + PCB_PC]
/*
* Load the new thread's frame pointer and program counter, and set
* the current thread and pcb.
*/
.Lsw1:
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: new td=%p pc=%#lx fp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
ldx [%o0 + TD_PCB], %g2
ldx [%g2 + PCB_PC], %g3
stx %g3, [%g1 + KTR_PARM2]
ldx [%g2 + PCB_FP], %g3
stx %g3, [%g1 + KTR_PARM3]
9:
#endif
ldx [%o0 + TD_PCB], %o1
ldx [%o1 + PCB_FP], %fp
ldx [%o1 + PCB_PC], %i7
sub %fp, CCFSZ, %sp
stx %o0, [PCPU(CURTHREAD)]
stx %o1, [PCPU(CURPCB)]
SET(sched_lock, %o3, %o2)
stx %o0, [%o2 + MTX_LOCK]
wrpr %g0, PSTATE_NORMAL, %pstate
mov %o1, PCB_REG
wrpr %g0, PSTATE_ALT, %pstate
mov %o1, PCB_REG
wrpr %g0, PSTATE_KERNEL, %pstate
/*
* Point to the vmspaces of the new process, and of the last non-kernel
* process to run.
*/
ldx [%o0 + TD_PROC], %o2
ldx [PCPU(VMSPACE)], %l2
ldx [%o2 + P_VMSPACE], %o2
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: new vm=%p old vm=%p"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
stx %l2, [%g1 + KTR_PARM2]
9:
#endif
/*
* If they are the same we are done.
*/
cmp %l2, %o2
be,a,pn %xcc, 4f
nop
/*
* If the new process has nucleus context we are done.
*/
lduw [PCPU(CPUID)], %o3
sllx %o3, INT_SHIFT, %o3
add %o2, VM_PMAP + PM_CONTEXT, %o4
lduw [%o3 + %o4], %o5
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: ctx=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
stx %o5, [%g1 + KTR_PARM1]
9:
#endif
brz,a,pn %o5, 4f
nop
/*
* If there was no non-kernel vmspace, don't try to deactivate it.
*/
brz,a,pn %l2, 2f
nop
/*
* Mark the pmap of the last non-kernel vmspace to run as no longer
* active on this cpu.
*/
lduw [%l2 + VM_PMAP + PM_ACTIVE], %l3
lduw [PCPU(CPUMASK)], %l4
andn %l3, %l4, %l3
stw %l3, [%l2 + VM_PMAP + PM_ACTIVE]
/*
* Take away its context.
*/
lduw [PCPU(CPUID)], %l3
sllx %l3, INT_SHIFT, %l3
add %l2, VM_PMAP + PM_CONTEXT, %l4
mov -1, %l5
stw %l5, [%l3 + %l4]
/*
* Find the current free tlb context for this cpu and install it as
* the new primary context.
*/
2: lduw [PCPU(TLB_CTX)], %o5
stw %o5, [%o3 + %o4]
mov AA_DMMU_PCXR, %o4
stxa %o5, [%o4] ASI_DMMU
membar #Sync
/*
* See if we have run out of free contexts.
*/
lduw [PCPU(TLB_CTX_MAX)], %o3
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: ctx=%#lx next=%#lx max=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
stx %o5, [%g1 + KTR_PARM1]
add %o5, 1, %g2
stx %g2, [%g1 + KTR_PARM2]
stx %o3, [%g1 + KTR_PARM3]
9:
#endif
add %o5, 1, %o5
cmp %o3, %o5
bne,a,pt %xcc, 3f
stw %o5, [PCPU(TLB_CTX)]
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: context rollover"
, %g1, %g2, %g3, 7, 8, 9)
9:
#endif
/*
* We will start re-using contexts on the next switch. Flush all
* non-nucleus mappings from the tlb, and reset the next free context.
*/
call pmap_context_rollover
nop
ldx [PCPU(CURTHREAD)], %o0
ldx [%o0 + TD_PROC], %o2
ldx [%o2 + P_VMSPACE], %o2
/*
* Mark the pmap as active on this cpu.
*/
3: lduw [%o2 + VM_PMAP + PM_ACTIVE], %o3
lduw [PCPU(CPUMASK)], %o4
or %o3, %o4, %o3
stw %o3, [%o2 + VM_PMAP + PM_ACTIVE]
/*
* Make note of the change in vmspace.
*/
stx %o2, [PCPU(VMSPACE)]
/*
* Load the address of the tsb, switch to mmu globals, and install
* the preloaded tsb pointer.
*/
ldx [%o2 + VM_PMAP + PM_TSB], %o3
wrpr %g0, PSTATE_MMU, %pstate
mov %o3, TSB_REG
wrpr %g0, PSTATE_KERNEL, %pstate
4:
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: return"
, %g1, %g2, %g3, 7, 8, 9)
9:
#endif
/*
* Done. Return and load the new process's window from the stack.
*/
ret
restore
END(cpu_switch)
ENTRY(savectx)
save %sp, -CCFSZ, %sp
flushw
call savefpctx
mov %i0, %o0
stx %fp, [%i0 + PCB_FP]
stx %i7, [%i0 + PCB_PC]
ret
restore %g0, 0, %o0
END(savectx)
/*
* void savefpctx(struct fpstate *);
*/
ENTRY(savefpctx)
wr %g0, FPRS_FEF, %fprs
wr %g0, ASI_BLK_S, %asi
stda %f0, [%o0 + PCB_FPSTATE + FP_FB0] %asi
stda %f16, [%o0 + PCB_FPSTATE + FP_FB1] %asi
stda %f32, [%o0 + PCB_FPSTATE + FP_FB2] %asi
stda %f48, [%o0 + PCB_FPSTATE + FP_FB3] %asi
membar #Sync
retl
wr %g0, 0, %fprs
END(savefpctx)
/*
* void restorefpctx(struct fpstate *);
*/
ENTRY(restorefpctx)
wr %g0, FPRS_FEF, %fprs
wr %g0, ASI_BLK_S, %asi
ldda [%o0 + PCB_FPSTATE + FP_FB0] %asi, %f0
ldda [%o0 + PCB_FPSTATE + FP_FB1] %asi, %f16
ldda [%o0 + PCB_FPSTATE + FP_FB2] %asi, %f32
ldda [%o0 + PCB_FPSTATE + FP_FB3] %asi, %f48
membar #Sync
retl
wr %g0, 0, %fprs
END(restorefpctx)