Merging projects/armv6, part 1

Cummulative patch of changes that are not vendor-specific:
	- ARMv6 and ARMv7 architecture support
	- ARM SMP support
	- VFP/Neon support
	- ARM Generic Interrupt Controller driver
	- Simplification of startup code for all platforms
This commit is contained in:
Oleksandr Tymoshenko 2012-08-15 03:03:03 +00:00
parent 8340ece577
commit cf1a573f04
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=239268
64 changed files with 9949 additions and 701 deletions

View File

@ -54,14 +54,19 @@ __FBSDID("$FreeBSD$");
.text
.align 0
#ifdef MULTIPROCESSOR
.Lcpu_info:
.word _C_LABEL(cpu_info)
#ifdef _ARM_ARCH_6
#define GET_PCB(tmp) \
mrc p15, 0, tmp, c13, c0, 4; \
add tmp, tmp, #(PC_CURPCB)
#else
.Lcurpcb:
.word _C_LABEL(__pcpu) + PC_CURPCB
.word _C_LABEL(__pcpu) + PC_CURPCB
#define GET_PCB(tmp) \
ldr tmp, .Lcurpcb
#endif
#define SAVE_REGS stmfd sp!, {r4-r11}
#define RESTORE_REGS ldmfd sp!, {r4-r11}
@ -111,18 +116,9 @@ ENTRY(copyin)
.Lnormal:
SAVE_REGS
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r2, r14}
bl _C_LABEL(cpu_number)
ldr r4, .Lcpu_info
ldr r4, [r4, r0, lsl #2]
ldr r4, [r4, #CI_CURPCB]
ldmfd sp!, {r0-r2, r14}
#else
ldr r4, .Lcurpcb
GET_PCB(r4)
ldr r4, [r4]
#endif
ldr r5, [r4, #PCB_ONFAULT]
adr r3, .Lcopyfault
@ -357,18 +353,8 @@ ENTRY(copyout)
.Lnormale:
SAVE_REGS
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r2, r14}
bl _C_LABEL(cpu_number)
ldr r4, .Lcpu_info
ldr r4, [r4, r0, lsl #2]
ldr r4, [r4, #CI_CURPCB]
ldmfd sp!, {r0-r2, r14}
#else
ldr r4, .Lcurpcb
GET_PCB(r4)
ldr r4, [r4]
#endif
ldr r5, [r4, #PCB_ONFAULT]
adr r3, .Lcopyfault
@ -561,18 +547,9 @@ ENTRY(copyout)
* else EFAULT if a page fault occurred.
*/
ENTRY(badaddr_read_1)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0-r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
ldr ip, [r2, #PCB_ONFAULT]
adr r3, 1f
str r3, [r2, #PCB_ONFAULT]
@ -595,18 +572,9 @@ ENTRY(badaddr_read_1)
* else EFAULT if a page fault occurred.
*/
ENTRY(badaddr_read_2)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0-r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
ldr ip, [r2, #PCB_ONFAULT]
adr r3, 1f
str r3, [r2, #PCB_ONFAULT]
@ -629,18 +597,9 @@ ENTRY(badaddr_read_2)
* else EFAULT if a page fault occurred.
*/
ENTRY(badaddr_read_4)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0-r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
ldr ip, [r2, #PCB_ONFAULT]
adr r3, 1f
str r3, [r2, #PCB_ONFAULT]

View File

@ -41,12 +41,15 @@ __FBSDID("$FreeBSD$");
.text
.align 0
#ifdef MULTIPROCESSOR
.Lcpu_info:
.word _C_LABEL(cpu_info)
#ifdef _ARM_ARCH_6
#define GET_PCB(tmp) \
mrc p15, 0, tmp, c13, c0, 4; \
add tmp, tmp, #(PC_CURPCB)
#else
.Lcurpcb:
.word _C_LABEL(__pcpu) + PC_CURPCB
#define GET_PCB(tmp) \
ldr tmp, .Lcurpcb
#endif
/*
@ -85,18 +88,8 @@ ENTRY(copyin)
.Lnormal:
stmfd sp!, {r10-r11, lr}
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r2}
bl _C_LABEL(cpu_number)
ldr r10, .Lcpu_info
ldmfd sp!, {r0-r2}
ldr r10, [r10, r0, lsl #2]
ldr r10, [r10, #CI_CURPCB]
#else
ldr r10, .Lcurpcb
GET_PCB(r10)
ldr r10, [r10]
#endif
mov r3, #0x00
adr ip, .Lcopyin_fault
@ -537,18 +530,8 @@ ENTRY(copyout)
.Lnormale:
stmfd sp!, {r10-r11, lr}
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r2}
bl _C_LABEL(cpu_number)
ldr r10, .Lcpu_info
ldmfd sp!, {r0-r2}
ldr r10, [r10, r0, lsl #2]
ldr r10, [r10, #CI_CURPCB]
#else
ldr r10, .Lcurpcb
GET_PCB(r10)
ldr r10, [r10]
#endif
mov r3, #0x00
adr ip, .Lcopyout_fault

View File

@ -51,11 +51,9 @@ ENTRY(generic_bs_r_1)
ldrb r0, [r1, r2]
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_r_2)
ldrh r0, [r1, r2]
RET
#endif
ENTRY(generic_bs_r_4)
ldr r0, [r1, r2]
@ -69,11 +67,9 @@ ENTRY(generic_bs_w_1)
strb r3, [r1, r2]
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_w_2)
strh r3, [r1, r2]
RET
#endif
ENTRY(generic_bs_w_4)
str r3, [r1, r2]
@ -97,7 +93,6 @@ ENTRY(generic_bs_rm_1)
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_rm_2)
add r0, r1, r2
mov r1, r3
@ -111,7 +106,6 @@ ENTRY(generic_armv4_bs_rm_2)
bne 1b
RET
#endif
ENTRY(generic_bs_rm_4)
add r0, r1, r2
@ -145,7 +139,6 @@ ENTRY(generic_bs_wm_1)
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_wm_2)
add r0, r1, r2
mov r1, r3
@ -159,7 +152,6 @@ ENTRY(generic_armv4_bs_wm_2)
bne 1b
RET
#endif
ENTRY(generic_bs_wm_4)
add r0, r1, r2
@ -193,7 +185,6 @@ ENTRY(generic_bs_rr_1)
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_rr_2)
add r0, r1, r2
mov r1, r3
@ -207,7 +198,6 @@ ENTRY(generic_armv4_bs_rr_2)
bne 1b
RET
#endif
ENTRY(generic_bs_rr_4)
add r0, r1, r2
@ -241,7 +231,6 @@ ENTRY(generic_bs_wr_1)
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_wr_2)
add r0, r1, r2
mov r1, r3
@ -255,7 +244,6 @@ ENTRY(generic_armv4_bs_wr_2)
bne 1b
RET
#endif
ENTRY(generic_bs_wr_4)
add r0, r1, r2
@ -288,7 +276,6 @@ ENTRY(generic_bs_sr_1)
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_sr_2)
add r0, r1, r2
mov r1, r3
@ -301,7 +288,6 @@ ENTRY(generic_armv4_bs_sr_2)
bne 1b
RET
#endif
ENTRY(generic_bs_sr_4)
add r0, r1, r2
@ -320,7 +306,6 @@ ENTRY(generic_bs_sr_4)
* copy region
*/
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_c_2)
add r0, r1, r2
ldr r2, [sp, #0]
@ -350,4 +335,3 @@ ENTRY(generic_armv4_bs_c_2)
bne 3b
RET
#endif

File diff suppressed because it is too large Load Diff

View File

@ -49,12 +49,17 @@ __FBSDID("$FreeBSD$");
.text
.align 0
#ifdef MULTIPROCESSOR
.Lcpu_info:
.word _C_LABEL(cpu_info)
#ifdef _ARM_ARCH_6
#define GET_PCB(tmp) \
mrc p15, 0, tmp, c13, c0, 4; \
add tmp, tmp, #(PC_CURPCB)
#else
.Lpcb:
.word _C_LABEL(__pcpu) + PC_CURPCB
#define GET_PCB(tmp) \
ldr tmp, .Lpcb
#endif
/*
@ -108,18 +113,8 @@ ENTRY(copyinstr)
moveq r0, #ENAMETOOLONG
beq 2f
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r3, r14}
bl _C_LABEL(cpu_number)
ldr r4, .Lcpu_info
ldr r4, [r4, r0, lsl #2]
ldr r4, [r4, #CI_CURPCB]
ldmfd sp!, {r0-r3, r14}
#else
ldr r4, .Lpcb
GET_PCB(r4)
ldr r4, [r4]
#endif
#ifdef DIAGNOSTIC
teq r4, #0x00000000
@ -165,18 +160,8 @@ ENTRY(copyoutstr)
moveq r0, #ENAMETOOLONG
beq 2f
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0-r3, r14}
bl _C_LABEL(cpu_number)
ldr r4, .Lcpu_info
ldr r4, [r4, r0, lsl #2]
ldr r4, [r4, #CI_CURPCB]
ldmfd sp!, {r0-r3, r14}
#else
ldr r4, .Lpcb
GET_PCB(r4)
ldr r4, [r4]
#endif
#ifdef DIAGNOSTIC
teq r4, #0x00000000

View File

@ -98,6 +98,10 @@ int arm_pcache_unified;
int arm_dcache_align;
int arm_dcache_align_mask;
u_int arm_cache_level;
u_int arm_cache_type[14];
u_int arm_cache_loc;
/* 1 == use cpu_sleep(), 0 == don't */
int cpu_do_powersave;
int ctrl;
@ -472,6 +476,126 @@ struct cpu_functions arm10_cpufuncs = {
};
#endif /* CPU_ARM10 */
#ifdef CPU_MV_PJ4B
struct cpu_functions pj4bv7_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
arm11_drain_writebuf, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
pj4b_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
armv7_tlb_flushID, /* tlb_flushID */
armv7_tlb_flushID_SE, /* tlb_flushID_SE */
armv7_tlb_flushID, /* tlb_flushI */
armv7_tlb_flushID_SE, /* tlb_flushI_SE */
armv7_tlb_flushID, /* tlb_flushD */
armv7_tlb_flushID_SE, /* tlb_flushD_SE */
/* Cache operations */
armv7_idcache_wbinv_all, /* icache_sync_all */
armv7_icache_sync_range, /* icache_sync_range */
armv7_dcache_wbinv_all, /* dcache_wbinv_all */
armv7_dcache_wbinv_range, /* dcache_wbinv_range */
armv7_dcache_inv_range, /* dcache_inv_range */
armv7_dcache_wb_range, /* dcache_wb_range */
armv7_idcache_wbinv_all, /* idcache_wbinv_all */
armv7_idcache_wbinv_range, /* idcache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
/* Other functions */
pj4b_drain_readbuf, /* flush_prefetchbuf */
arm11_drain_writebuf, /* drain_writebuf */
pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */
pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm11_context_switch, /* context_switch */
pj4bv7_setup /* cpu setup */
};
struct cpu_functions pj4bv6_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
arm11_drain_writebuf, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
pj4b_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
arm11_tlb_flushID, /* tlb_flushID */
arm11_tlb_flushID_SE, /* tlb_flushID_SE */
arm11_tlb_flushI, /* tlb_flushI */
arm11_tlb_flushI_SE, /* tlb_flushI_SE */
arm11_tlb_flushD, /* tlb_flushD */
arm11_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv6_icache_sync_all, /* icache_sync_all */
pj4b_icache_sync_range, /* icache_sync_range */
armv6_dcache_wbinv_all, /* dcache_wbinv_all */
pj4b_dcache_wbinv_range, /* dcache_wbinv_range */
pj4b_dcache_inv_range, /* dcache_inv_range */
pj4b_dcache_wb_range, /* dcache_wb_range */
armv6_idcache_wbinv_all, /* idcache_wbinv_all */
pj4b_idcache_wbinv_range, /* idcache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
/* Other functions */
pj4b_drain_readbuf, /* flush_prefetchbuf */
arm11_drain_writebuf, /* drain_writebuf */
pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */
pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */
(void *)cpufunc_nullop, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm11_context_switch, /* context_switch */
pj4bv6_setup /* cpu setup */
};
#endif /* CPU_MV_PJ4B */
#ifdef CPU_SA110
struct cpu_functions sa110_cpufuncs = {
/* CPU functions */
@ -844,6 +968,70 @@ struct cpu_functions fa526_cpufuncs = {
};
#endif /* CPU_FA526 || CPU_FA626TE */
#if defined(CPU_CORTEXA)
struct cpu_functions cortexa_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
armv7_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
arm11_tlb_flushID, /* tlb_flushID */
armv7_tlb_flushID_SE, /* tlb_flushID_SE */
arm11_tlb_flushI, /* tlb_flushI */
arm11_tlb_flushI_SE, /* tlb_flushI_SE */
arm11_tlb_flushD, /* tlb_flushD */
arm11_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
armv7_idcache_wbinv_all, /* icache_sync_all */
armv7_icache_sync_range, /* icache_sync_range */
armv7_dcache_wbinv_all, /* dcache_wbinv_all */
armv7_dcache_wbinv_range, /* dcache_wbinv_range */
armv7_dcache_inv_range, /* dcache_inv_range */
armv7_dcache_wb_range, /* dcache_wb_range */
armv7_idcache_wbinv_all, /* idcache_wbinv_all */
armv7_idcache_wbinv_range, /* idcache_wbinv_range */
/* Note: From OMAP4 the L2 ops are filled in when the
* L2 cache controller is actually enabled.
*/
cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
/* Other functions */
cpufunc_nullop, /* flush_prefetchbuf */
arm11_drain_writebuf, /* drain_writebuf */
cpufunc_nullop, /* flush_brnchtgt_C */
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
arm11_sleep, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm11_context_switch, /* context_switch */
cortexa_setup /* cpu setup */
};
#endif /* CPU_CORTEXA */
/*
* Global constants also used by locore.s
@ -854,11 +1042,12 @@ u_int cputype;
u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
defined (CPU_ARM9E) || defined (CPU_ARM10) || \
defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_FA526) || defined(CPU_FA626TE) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_CORTEXA)
static void get_cachetype_cp15(void);
@ -871,12 +1060,15 @@ static int arm_dcache_l2_linesize;
static void
get_cachetype_cp15()
{
u_int ctype, isize, dsize;
u_int ctype, isize, dsize, cpuid;
u_int clevel, csize, i, sel;
u_int multiplier;
u_char type;
__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
: "=r" (ctype));
cpuid = cpufunc_id();
/*
* ...and thus spake the ARM ARM:
*
@ -884,57 +1076,89 @@ get_cachetype_cp15()
* reserved ID register is encountered, the System Control
* processor returns the value of the main ID register.
*/
if (ctype == cpufunc_id())
if (ctype == cpuid)
goto out;
if ((ctype & CPU_CT_S) == 0)
arm_pcache_unified = 1;
/*
* If you want to know how this code works, go read the ARM ARM.
*/
arm_pcache_type = CPU_CT_CTYPE(ctype);
if (arm_pcache_unified == 0) {
isize = CPU_CT_ISIZE(ctype);
multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
if (isize & CPU_CT_xSIZE_M)
arm_picache_line_size = 0; /* not present */
else
arm_picache_ways = 1;
} else {
arm_picache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(isize) - 1);
if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
: "=r" (clevel));
arm_cache_level = clevel;
arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
i = 0;
while ((type = (clevel & 0x7)) && i < 7) {
if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
type == CACHE_SEP_CACHE) {
sel = i << 1;
__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
: : "r" (sel));
__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
: "=r" (csize));
arm_cache_type[sel] = csize;
arm_dcache_align = 1 <<
(CPUV7_CT_xSIZE_LEN(csize) + 4);
arm_dcache_align_mask = arm_dcache_align - 1;
}
if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
sel = (i << 1) | 1;
__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
: : "r" (sel));
__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
: "=r" (csize));
arm_cache_type[sel] = csize;
}
i++;
clevel >>= 3;
}
arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
}
dsize = CPU_CT_DSIZE(ctype);
multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
if (dsize & CPU_CT_xSIZE_M)
arm_pdcache_line_size = 0; /* not present */
else
arm_pdcache_ways = 1;
} else {
arm_pdcache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(dsize) - 1);
if ((ctype & CPU_CT_S) == 0)
arm_pcache_unified = 1;
/*
* If you want to know how this code works, go read the ARM ARM.
*/
arm_pcache_type = CPU_CT_CTYPE(ctype);
if (arm_pcache_unified == 0) {
isize = CPU_CT_ISIZE(ctype);
multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
if (isize & CPU_CT_xSIZE_M)
arm_picache_line_size = 0; /* not present */
else
arm_picache_ways = 1;
} else {
arm_picache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(isize) - 1);
}
arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
}
dsize = CPU_CT_DSIZE(ctype);
multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
if (dsize & CPU_CT_xSIZE_M)
arm_pdcache_line_size = 0; /* not present */
else
arm_pdcache_ways = 1;
} else {
arm_pdcache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(dsize) - 1);
}
arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
arm_dcache_align = arm_pdcache_line_size;
arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
out:
arm_dcache_align_mask = arm_dcache_align - 1;
}
arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
arm_dcache_align = arm_pdcache_line_size;
arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
out:
arm_dcache_align_mask = arm_dcache_align - 1;
}
#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
@ -1049,40 +1273,32 @@ set_cpufuncs()
}
#endif /* CPU_ARM9 */
#if defined(CPU_ARM9E) || defined(CPU_ARM10)
if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41) {
if (cputype == CPU_ID_MV88FR131 ||
cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41) {
uint32_t sheeva_ctrl;
cpufuncs = sheeva_cpufuncs;
/*
* Workaround for Marvell MV78100 CPU: Cache prefetch
* mechanism may affect the cache coherency validity,
* so it needs to be disabled.
*
* Refer to errata document MV-S501058-00C.pdf (p. 3.1
* L2 Prefetching Mechanism) for details.
*/
if (cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41) {
sheeva_control_ext(0xffffffff,
FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
FC_L2_PREF_DIS);
} else {
sheeva_control_ext(0xffffffff,
FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
}
sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
MV_L2_ENABLE);
/*
* Workaround for Marvell MV78100 CPU: Cache prefetch
* mechanism may affect the cache coherency validity,
* so it needs to be disabled.
*
* Refer to errata document MV-S501058-00C.pdf (p. 3.1
* L2 Prefetching Mechanism) for details.
*/
if (cputype == CPU_ID_MV88FR571_VD ||
cputype == CPU_ID_MV88FR571_41)
sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
/* Use powersave on this CPU. */
cpu_do_powersave = 1;
} else
cpufuncs = armv5_ec_cpufuncs;
sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
cpufuncs = sheeva_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_generic();
goto out;
} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
cpufuncs = armv5_ec_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_generic();
goto out;
@ -1108,6 +1324,45 @@ set_cpufuncs()
goto out;
}
#endif /* CPU_ARM10 */
#ifdef CPU_CORTEXA
if (cputype == CPU_ID_CORTEXA8R1 ||
cputype == CPU_ID_CORTEXA8R2 ||
cputype == CPU_ID_CORTEXA8R3 ||
cputype == CPU_ID_CORTEXA9R1 ||
cputype == CPU_ID_CORTEXA9R2) {
cpufuncs = cortexa_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
pmap_pte_init_mmu_v6();
/* Use powersave on this CPU. */
cpu_do_powersave = 1;
goto out;
}
#endif /* CPU_CORTEXA */
#if defined(CPU_MV_PJ4B)
if (cputype == CPU_ID_MV88SV581X_V6 ||
cputype == CPU_ID_MV88SV581X_V7 ||
cputype == CPU_ID_ARM_88SV581X_V6 ||
cputype == CPU_ID_ARM_88SV581X_V7) {
if (cpu_pfr(0) & ARM_PFR0_THUMBEE_MASK)
cpufuncs = pj4bv7_cpufuncs;
else
cpufuncs = pj4bv6_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_mmu_v6();
goto out;
} else if (cputype == CPU_ID_ARM_88SV584X ||
cputype == CPU_ID_MV88SV584X) {
cpufuncs = pj4bv6_cpufuncs;
get_cachetype_cp15();
pmap_pte_init_mmu_v6();
goto out;
}
#endif /* CPU_MV_PJ4B */
#ifdef CPU_SA110
if (cputype == CPU_ID_SA110) {
cpufuncs = sa110_cpufuncs;
@ -1970,7 +2225,6 @@ arm11_setup(args)
__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
/* Set the control register */
curcpu()->ci_ctrl = cpuctrl;
cpu_control(0xffffffff, cpuctrl);
/* And again. */
@ -1978,6 +2232,126 @@ arm11_setup(args)
}
#endif /* CPU_ARM11 */
#ifdef CPU_MV_PJ4B
void
pj4bv6_setup(char *args)
{
int cpuctrl;
pj4b_config();
cpuctrl = CPU_CONTROL_MMU_ENABLE;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
cpuctrl |= CPU_CONTROL_DC_ENABLE;
cpuctrl |= (0xf << 3);
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
cpuctrl |= CPU_CONTROL_SYST_ENABLE;
cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
cpuctrl |= CPU_CONTROL_IC_ENABLE;
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
cpuctrl |= (0x5 << 16);
cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
/* XXX not yet */
/* cpuctrl |= CPU_CONTROL_L2_ENABLE; */
/* Make sure caches are clean. */
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
/* Set the control register */
ctrl = cpuctrl;
cpu_control(0xffffffff, cpuctrl);
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
}
void
pj4bv7_setup(args)
char *args;
{
int cpuctrl;
pj4b_config();
cpuctrl = CPU_CONTROL_MMU_ENABLE;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
cpuctrl |= CPU_CONTROL_DC_ENABLE;
cpuctrl |= (0xf << 3);
cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
cpuctrl |= CPU_CONTROL_IC_ENABLE;
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
cpuctrl |= (0x5 << 16) | (1 < 22);
cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Set the control register */
ctrl = cpuctrl;
cpu_control(0xFFFFFFFF, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
}
#endif /* CPU_MV_PJ4B */
#ifdef CPU_CORTEXA
void
cortexa_setup(char *args)
{
int cpuctrl, cpuctrlmask;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
cpuctrl = CPU_CONTROL_MMU_ENABLE |
CPU_CONTROL_IC_ENABLE |
CPU_CONTROL_DC_ENABLE |
CPU_CONTROL_BPRD_ENABLE;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
/* Switch to big endian */
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
/* Check if the vector page is at the high address (0xffff0000) */
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Set the control register */
ctrl = cpuctrl;
cpu_control(cpuctrlmask, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
#ifdef SMP
armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
#endif
}
#endif /* CPU_CORTEXA */
#ifdef CPU_SA110
struct cpu_option sa110_options[] = {
#ifdef COMPAT_12

View File

@ -65,6 +65,10 @@ ENTRY(cpufunc_id)
mrc p15, 0, r0, c0, c0, 0
RET
ENTRY(cpufunc_cpuid)
mrc p15, 0, r0, c0, c0, 0
RET
ENTRY(cpu_get_control)
mrc p15, 0, r0, c1, c0, 0
RET

View File

@ -122,3 +122,8 @@ ENTRY(arm11_tlb_flushD_SE)
ENTRY(arm11_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
ENTRY_NP(arm11_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
RET

View File

@ -0,0 +1,277 @@
/*-
* Copyright (C) 2011 MARVELL INTERNATIONAL LTD.
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
.cpu cortex-a8
.Lcoherency_level:
.word _C_LABEL(arm_cache_loc)
.Lcache_type:
.word _C_LABEL(arm_cache_type)
.Lway_mask:
.word 0x3ff
.Lmax_index:
.word 0x7fff
.Lpage_mask:
.word 0xfff
#define PT_NOS (1 << 5)
#define PT_S (1 << 1)
#define PT_INNER_NC 0
#define PT_INNER_WT (1 << 0)
#define PT_INNER_WB ((1 << 0) | (1 << 6))
#define PT_INNER_WBWA (1 << 6)
#define PT_OUTER_NC 0
#define PT_OUTER_WT (2 << 3)
#define PT_OUTER_WB (3 << 3)
#define PT_OUTER_WBWA (1 << 3)
#ifdef SMP
#define PT_ATTR (PT_S|PT_INNER_WT|PT_OUTER_WT|PT_NOS)
#else
#define PT_ATTR (PT_INNER_WT|PT_OUTER_WT)
#endif
ENTRY(armv7_setttb)
stmdb sp!, {r0, lr}
bl _C_LABEL(armv7_idcache_wbinv_all) /* clean the D cache */
ldmia sp!, {r0, lr}
dsb
orr r0, r0, #PT_ATTR
mcr p15, 0, r0, c2, c0, 0 /* Translation Table Base Register 0 (TTBR0) */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
dsb
isb
RET
ENTRY(armv7_tlb_flushID)
dsb
#ifdef SMP
mcr p15, 0, r0, c8, c3, 0
#else
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
#endif
mcr p15, 0, r0, c7, c5, 6 /* flush BTB */
dsb
isb
mov pc, lr
ENTRY(armv7_tlb_flushID_SE)
ldr r1, .Lpage_mask
bic r0, r0, r1
#ifdef SMP
mcr p15, 0, r0, c8, c3, 1 /* flush D tlb single entry */
#else
mcr p15, 0, r0, c8, c7, 1 /* flush D tlb single entry */
#endif
mcr p15, 0, r0, c7, c5, 6 /* flush BTB */
dsb
isb
mov pc, lr
/* Based on algorithm from ARM Architecture Reference Manual */
ENTRY(armv7_dcache_wbinv_all)
stmdb sp!, {r4, r5, r6, r7, r8, r9}
/* Get cache level */
ldr r0, .Lcoherency_level
ldr r3, [r0]
cmp r3, #0
beq Finished
/* For each cache level */
mov r8, #0
Loop1:
/* Get cache type for given level */
mov r2, r8, lsl #2
add r2, r2, r2
ldr r0, .Lcache_type
ldr r1, [r0, r2]
/* Get line size */
and r2, r1, #7
add r2, r2, #4
/* Get number of ways */
ldr r4, .Lway_mask
ands r4, r4, r1, lsr #3
clz r5, r4
/* Get max index */
ldr r7, .Lmax_index
ands r7, r7, r1, lsr #13
Loop2:
mov r9, r4
Loop3:
mov r6, r8, lsl #1
orr r6, r6, r9, lsl r5
orr r6, r6, r7, lsl r2
/* Clean and invalidate data cache by way/index */
mcr p15, 0, r6, c7, c14, 2
subs r9, r9, #1
bge Loop3
subs r7, r7, #1
bge Loop2
Skip:
add r8, r8, #1
cmp r3, r8
bne Loop1
Finished:
dsb
ldmia sp!, {r4, r5, r6, r7, r8, r9}
RET
ENTRY(armv7_idcache_wbinv_all)
stmdb sp!, {lr}
bl armv7_dcache_wbinv_all
mcr p15, 0, r0, c7, c5, 0 /* Invalidate all I caches to PoU (ICIALLU) */
dsb
isb
ldmia sp!, {lr}
RET
/* XXX Temporary set it to 32 for MV cores, however this value should be
* get from Cache Type register
*/
.Larmv7_line_size:
.word 32
ENTRY(armv7_dcache_wb_range)
ldr ip, .Larmv7_line_size
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_wb_next:
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_wb_next
dsb /* data synchronization barrier */
RET
ENTRY(armv7_dcache_wbinv_range)
ldr ip, .Larmv7_line_size
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_wbinv_next:
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_wbinv_next
dsb /* data synchronization barrier */
RET
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
*/
ENTRY(armv7_dcache_inv_range)
ldr ip, .Larmv7_line_size
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_inv_next:
mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_inv_next
dsb /* data synchronization barrier */
RET
ENTRY(armv7_idcache_wbinv_range)
ldr ip, .Larmv7_line_size
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
.Larmv7_id_wbinv_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_id_wbinv_next
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
ENTRY_NP(armv7_icache_sync_range)
ldr ip, .Larmv7_line_size
.Larmv7_sync_next:
mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
add r0, r0, ip
subs r1, r1, ip
bhi .Larmv7_sync_next
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
ENTRY(armv7_cpu_sleep)
dsb /* data synchronization barrier */
wfi /* wait for interrupt */
RET
ENTRY(armv7_context_switch)
dsb
orr r0, r0, #PT_ATTR
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
dsb
isb
RET
ENTRY(armv7_drain_writebuf)
dsb
RET
ENTRY(armv7_sev)
dsb
sev
nop
RET
ENTRY(armv7_auxctrl)
mrc p15, 0, r2, c1, c0, 1
bic r3, r2, r0 /* Clear bits */
eor r3, r3, r1 /* XOR bits */
teq r2, r3
mcrne p15, 0, r3, c1, c0, 1
mov r0, r2
RET

View File

@ -0,0 +1,202 @@
/*-
* Copyright (C) 2011 MARVELL INTERNATIONAL LTD.
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
#include <machine/param.h>
.Lpj4b_cache_line_size:
.word _C_LABEL(arm_pdcache_line_size)
ENTRY(pj4b_setttb)
/* Cache synchronization is not required as this core has PIPT caches */
mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */
#ifdef SMP
orr r0, r0, #2 /* Set TTB shared memory flag */
#endif
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
ENTRY_NP(armv6_icache_sync_all)
/*
* We assume that the code here can never be out of sync with the
* dcache, so that we can safely flush the Icache and fall through
* into the Dcache cleaning code.
*/
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
mcr p15, 0, r0, c7, c10, 0 /* Clean (don't invalidate) DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_icache_sync_range)
sub r1, r1, #1
add r1, r0, r1
mcrr p15, 0, r1, r0, c5 /* invalidate IC range */
mcrr p15, 0, r1, r0, c12 /* clean DC range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_dcache_inv_range)
ldr ip, .Lpj4b_cache_line_size
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
mcr p15, 0, r0, c7, c10, 5 /* Data Memory Barrier err:4413 */
1:
mcr p15, 0, r0, c7, c6, 1
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv6_idcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* invalidate ICache */
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(armv6_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_idcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
mcr p15, 0, r0, c7, c10, 5 /* Data Memory Barrier err:4611 */
1:
#ifdef SMP
/* Request for ownership */
ldr r2, [r0]
str r2, [r0]
#endif
mcr p15, 0, r0, c7, c5, 1
mcr p15, 0, r0, c7, c14, 1 /* L2C clean and invalidate entry */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_dcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
mcr p15, 0, r0, c7, c10, 5 /* Data Memory Barrier err:4611 */
1:
#ifdef SMP
/* Request for ownership */
ldr r2, [r0]
str r2, [r0]
#endif
mcr p15, 0, r0, c7, c14, 1
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_dcache_wb_range)
ldr ip, .Lpj4b_cache_line_size
ldr ip, [ip]
sub r1, r1, #1 /* Don't overrun */
sub r3, ip, #1
and r2, r0, r3
add r1, r1, r2
bic r0, r0, r3
mcr p15, 0, r0, c7, c10, 5 /* Data Memory Barrier err:4611 */
1:
#ifdef SMP
/* Request for ownership */
ldr r2, [r0]
str r2, [r0]
#endif
mcr p15, 0, r0, c7, c10, 1 /* L2C clean single entry by MVA */
add r0, r0, ip
subs r1, r1, ip
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
ENTRY(pj4b_drain_readbuf)
mcr p15, 0, r0, c7, c5, 4 /* flush prefetch buffers */
RET
ENTRY(pj4b_flush_brnchtgt_all)
mcr p15, 0, r0, c7, c5, 6 /* flush entrie branch target cache */
RET
ENTRY(pj4b_flush_brnchtgt_va)
mcr p15, 0, r0, c7, c5, 7 /* flush branch target cache by VA */
RET
ENTRY(get_core_id)
mrc p15, 0, r0, c0, c0, 5
RET
ENTRY(pj4b_config)
/* Set Auxiliary Debug Modes Control 2 register */
mrc p15, 1, r0, c15, c1, 2
bic r0, r0, #(1 << 23)
orr r0, r0, #(1 << 25)
orr r0, r0, #(1 << 27)
orr r0, r0, #(1 << 29)
orr r0, r0, #(1 << 30)
mcr p15, 1, r0, c15, c1, 2
#if defined(SMP)
/* Set SMP mode in Auxiliary Control Register */
mrc p15, 0, r0, c1, c0, 1
orr r0, r0, #(1 << 5)
mcr p15, 0, r0, c1, c0, 1
#endif
RET

View File

@ -72,15 +72,25 @@ void __startC(void);
#define cpu_idcache_wbinv_all xscale_cache_purgeID
#elif defined(CPU_XSCALE_81342)
#define cpu_idcache_wbinv_all xscalec3_cache_purgeID
#elif defined(CPU_MV_PJ4B)
#if !defined(SOC_MV_ARMADAXP)
#define cpu_idcache_wbinv_all armv6_idcache_wbinv_all
#else
#define cpu_idcache_wbinv_all() armadaxp_idcache_wbinv_all
#endif
#endif /* CPU_MV_PJ4B */
#ifdef CPU_XSCALE_81342
#define cpu_l2cache_wbinv_all xscalec3_l2cache_purge
#elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
#define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all
#elif defined(CPU_CORTEXA)
#define cpu_idcache_wbinv_all armv7_idcache_wbinv_all
#define cpu_l2cache_wbinv_all()
#else
#define cpu_l2cache_wbinv_all()
#endif
static void armadaxp_idcache_wbinv_all(void);
int arm_picache_size;
int arm_picache_line_size;
@ -96,6 +106,10 @@ int arm_pcache_unified;
int arm_dcache_align;
int arm_dcache_align_mask;
u_int arm_cache_level;
u_int arm_cache_type[14];
u_int arm_cache_loc;
/* Additional cache information local to this file. Log2 of some of the
above numbers. */
static int arm_dcache_l2_nsets;
@ -221,8 +235,6 @@ _startC(void)
if ((cpufunc_id() & 0x0000f000) == 0x00009000)
arm9_setup();
#endif
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
#endif
__start();
}
@ -230,68 +242,102 @@ _startC(void)
static void
get_cachetype_cp15()
{
u_int ctype, isize, dsize;
u_int ctype, isize, dsize, cpuid;
u_int clevel, csize, i, sel;
u_int multiplier;
u_char type;
__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
: "=r" (ctype));
: "=r" (ctype));
cpuid = cpufunc_id();
/*
* ...and thus spake the ARM ARM:
*
* If an <opcode2> value corresponding to an unimplemented or
* If an <opcode2> value corresponding to an unimplemented or
* reserved ID register is encountered, the System Control
* processor returns the value of the main ID register.
*/
if (ctype == cpufunc_id())
if (ctype == cpuid)
goto out;
if ((ctype & CPU_CT_S) == 0)
arm_pcache_unified = 1;
/*
* If you want to know how this code works, go read the ARM ARM.
*/
arm_pcache_type = CPU_CT_CTYPE(ctype);
if (arm_pcache_unified == 0) {
isize = CPU_CT_ISIZE(ctype);
multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
if (isize & CPU_CT_xSIZE_M)
arm_picache_line_size = 0; /* not present */
else
arm_picache_ways = 1;
} else {
arm_picache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(isize) - 1);
if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
: "=r" (clevel));
arm_cache_level = clevel;
arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1;
i = 0;
while ((type = (clevel & 0x7)) && i < 7) {
if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
type == CACHE_SEP_CACHE) {
sel = i << 1;
__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
: : "r" (sel));
__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
: "=r" (csize));
arm_cache_type[sel] = csize;
}
if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
sel = (i << 1) | 1;
__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
: : "r" (sel));
__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
: "=r" (csize));
arm_cache_type[sel] = csize;
}
i++;
clevel >>= 3;
}
arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
}
dsize = CPU_CT_DSIZE(ctype);
multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
if (dsize & CPU_CT_xSIZE_M)
arm_pdcache_line_size = 0; /* not present */
else
arm_pdcache_ways = 1;
} else {
arm_pdcache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(dsize) - 1);
if ((ctype & CPU_CT_S) == 0)
arm_pcache_unified = 1;
/*
* If you want to know how this code works, go read the ARM ARM.
*/
arm_pcache_type = CPU_CT_CTYPE(ctype);
if (arm_pcache_unified == 0) {
isize = CPU_CT_ISIZE(ctype);
multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
if (isize & CPU_CT_xSIZE_M)
arm_picache_line_size = 0; /* not present */
else
arm_picache_ways = 1;
} else {
arm_picache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(isize) - 1);
}
arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
}
dsize = CPU_CT_DSIZE(ctype);
multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
if (dsize & CPU_CT_xSIZE_M)
arm_pdcache_line_size = 0; /* not present */
else
arm_pdcache_ways = 1;
} else {
arm_pdcache_ways = multiplier <<
(CPU_CT_xSIZE_ASSOC(dsize) - 1);
}
arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
arm_dcache_align = arm_pdcache_line_size;
arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
out:
arm_dcache_align_mask = arm_dcache_align - 1;
}
arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
arm_dcache_align = arm_pdcache_line_size;
arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
out:
arm_dcache_align_mask = arm_dcache_align - 1;
}
static void
@ -306,7 +352,18 @@ arm9_setup(void)
arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
}
static void
armadaxp_idcache_wbinv_all(void)
{
uint32_t feat;
__asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat));
if (feat & ARM_PFR0_THUMBEE_MASK)
armv7_idcache_wbinv_all();
else
armv6_idcache_wbinv_all();
}
#ifdef KZIP
static unsigned char *orig_input, *i_input, *i_output;

View File

@ -39,12 +39,15 @@
#include "assym.s"
__FBSDID("$FreeBSD$");
#ifdef MULTIPROCESSOR
.Lcpu_info:
.word _C_LABEL(cpu_info)
#ifdef _ARM_ARCH_6
#define GET_PCB(tmp) \
mrc p15, 0, tmp, c13, c0, 4; \
add tmp, tmp, #(PC_CURPCB)
#else
.Lcurpcb:
.word _C_LABEL(__pcpu) + PC_CURPCB
#define GET_PCB(tmp) \
ldr tmp, .Lcurpcb
#endif
/*
@ -54,18 +57,8 @@ __FBSDID("$FreeBSD$");
ENTRY_NP(casuword32)
ENTRY(casuword)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r14}
#else
ldr r3, .Lcurpcb
GET_PCB(r3)
ldr r3, [r3]
#endif
#ifdef DIAGNOSTIC
teq r3, #0x00000000
@ -101,18 +94,8 @@ ENTRY(casuword)
ENTRY_NP(fuword32)
ENTRY(fuword)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -135,18 +118,8 @@ ENTRY(fuword)
*/
ENTRY(fusword)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -180,18 +153,8 @@ ENTRY(fuswintr)
mvnne r0, #0x00000000
RETne
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -229,18 +192,8 @@ _C_LABEL(block_userspace_access):
*/
ENTRY(fubyte)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -303,18 +256,8 @@ fusupcbfaulttext:
ENTRY_NP(suword32)
ENTRY(suword)
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
stmfd sp!, {r0, r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -343,17 +286,8 @@ ENTRY(suswintr)
mvnne r0, #0x00000000
RETne
#ifdef MULTIPROCESSOR
stmfd sp!, {r0, r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -382,17 +316,8 @@ ENTRY(suswintr)
*/
ENTRY(susword)
#ifdef MULTIPROCESSOR
stmfd sp!, {r0, r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC
teq r2, #0x00000000
@ -421,17 +346,8 @@ ENTRY(susword)
*/
ENTRY(subyte)
#ifdef MULTIPROCESSOR
stmfd sp!, {r0, r1, r14}
bl _C_LABEL(cpu_number)
ldr r2, .Lcpu_info
ldr r2, [r2, r0, lsl #2]
ldr r2, [r2, #CI_CURPCB]
ldmfd sp!, {r0, r1, r14}
#else
ldr r2, .Lcurpcb
GET_PCB(r2)
ldr r2, [r2]
#endif
#ifdef DIAGNOSTIC

View File

@ -34,7 +34,9 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <machine/vmparam.h>
#include <machine/armreg.h>
#include <machine/pcb.h>
@ -105,9 +107,22 @@ ASSYM(TF_PC, offsetof(struct trapframe, tf_pc));
ASSYM(P_PID, offsetof(struct proc, p_pid));
ASSYM(P_FLAG, offsetof(struct proc, p_flag));
#ifdef ARM_TP_ADDRESS
ASSYM(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
ASSYM(ARM_RAS_START, ARM_RAS_START);
ASSYM(ARM_RAS_END, ARM_RAS_END);
#endif
#ifdef ARM_VFP_SUPPORT
ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate));
ASSYM(PCB_VFPCPU, offsetof(struct pcb, pcb_vfpcpu));
ASSYM(PC_VFPCTHREAD, offsetof(struct pcpu, pc_vfpcthread));
ASSYM(PC_CPU, offsetof(struct pcpu, pc_cpu));
ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap));
#endif
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(PDESIZE, PDESIZE);
ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL);

307
sys/arm/arm/gic.c Normal file
View File

@ -0,0 +1,307 @@
/*-
* Copyright (c) 2011 The FreeBSD Foundation
* All rights reserved.
*
* Developed by Damjan Marion <damjan.marion@gmail.com>
*
* Based on OMAP4 GIC code by Ben Gray
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/intr.h>
#include <machine/smp.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
/* We are using GICv2 register naming */
/* Distributor Registers */
#define GICD_CTLR 0x000 /* v1 ICDDCR */
#define GICD_TYPER 0x004 /* v1 ICDICTR */
#define GICD_IIDR 0x008 /* v1 ICDIIDR */
#define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */
#define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */
#define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */
#define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */
#define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */
#define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */
#define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */
#define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */
#define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */
#define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */
/* CPU Registers */
#define GICC_CTLR 0x0000 /* v1 ICCICR */
#define GICC_PMR 0x0004 /* v1 ICCPMR */
#define GICC_BPR 0x0008 /* v1 ICCBPR */
#define GICC_IAR 0x000C /* v1 ICCIAR */
#define GICC_EOIR 0x0010 /* v1 ICCEOIR */
#define GICC_RPR 0x0014 /* v1 ICCRPR */
#define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
#define GICC_ABPR 0x001C /* v1 ICCABPR */
#define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
struct arm_gic_softc {
struct resource * gic_res[3];
bus_space_tag_t gic_c_bst;
bus_space_tag_t gic_d_bst;
bus_space_handle_t gic_c_bsh;
bus_space_handle_t gic_d_bsh;
uint8_t ver;
};
static struct resource_spec arm_gic_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
{ -1, 0 }
};
static struct arm_gic_softc *arm_gic_sc = NULL;
#define gic_c_read_4(reg) \
bus_space_read_4(arm_gic_sc->gic_c_bst, arm_gic_sc->gic_c_bsh, reg)
#define gic_c_write_4(reg, val) \
bus_space_write_4(arm_gic_sc->gic_c_bst, arm_gic_sc->gic_c_bsh, reg, val)
#define gic_d_read_4(reg) \
bus_space_read_4(arm_gic_sc->gic_d_bst, arm_gic_sc->gic_d_bsh, reg)
#define gic_d_write_4(reg, val) \
bus_space_write_4(arm_gic_sc->gic_d_bst, arm_gic_sc->gic_d_bsh, reg, val)
static void gic_post_filter(void *);
static int
arm_gic_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,gic"))
return (ENXIO);
device_set_desc(dev, "ARM Generic Interrupt Controller");
return (BUS_PROBE_DEFAULT);
}
void
gic_init_secondary(void)
{
int nirqs;
/* Get the number of interrupts */
nirqs = gic_d_read_4(GICD_TYPER);
nirqs = 32 * ((nirqs & 0x1f) + 1);
for (int i = 0; i < nirqs; i += 4)
gic_d_write_4(GICD_IPRIORITYR(i >> 2), 0);
/* Enable CPU interface */
gic_c_write_4(GICC_CTLR, 1);
/* Enable interrupt distribution */
gic_d_write_4(GICD_CTLR, 0x01);
/* Activate IRQ 29, ie private timer IRQ*/
gic_d_write_4(GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F)));
}
static int
arm_gic_attach(device_t dev)
{
struct arm_gic_softc *sc = device_get_softc(dev);
int i;
uint32_t icciidr;
uint32_t nirqs;
if (arm_gic_sc)
return (ENXIO);
if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
arm_post_filter = gic_post_filter;
/* Distributor Interface */
sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
/* CPU Interface */
sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
arm_gic_sc = sc;
/* Disable interrupt forwarding to the CPU interface */
gic_d_write_4(GICD_CTLR, 0x00);
/* Get the number of interrupts */
nirqs = gic_d_read_4(GICD_TYPER);
nirqs = 32 * ((nirqs & 0x1f) + 1);
icciidr = gic_c_read_4(GICC_IIDR);
device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x nirqs %u\n",
icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf,
(icciidr & 0xfff), nirqs);
/* Set all global interrupts to be level triggered, active low. */
for (i = 32; i < nirqs; i += 32) {
gic_d_write_4(GICD_ICFGR(i >> 5), 0x00000000);
}
/* Disable all interrupts. */
for (i = 32; i < nirqs; i += 32) {
gic_d_write_4(GICD_ICENABLER(i >> 5), 0xFFFFFFFF);
}
for (i = 0; i < nirqs; i += 4) {
gic_d_write_4(GICD_IPRIORITYR(i >> 2), 0);
gic_d_write_4(GICD_ITARGETSR(i >> 2), 0xffffffff);
}
/* Enable CPU interface */
gic_c_write_4(GICC_CTLR, 1);
/* Enable interrupt distribution */
gic_d_write_4(GICD_CTLR, 0x01);
return (0);
}
static device_method_t arm_gic_methods[] = {
DEVMETHOD(device_probe, arm_gic_probe),
DEVMETHOD(device_attach, arm_gic_attach),
{ 0, 0 }
};
static driver_t arm_gic_driver = {
"gic",
arm_gic_methods,
sizeof(struct arm_gic_softc),
};
static devclass_t arm_gic_devclass;
DRIVER_MODULE(gic, simplebus, arm_gic_driver, arm_gic_devclass, 0, 0);
static void
gic_post_filter(void *arg)
{
uintptr_t irq = (uintptr_t) arg;
gic_c_write_4(GICC_EOIR, irq);
}
int
arm_get_next_irq(int last_irq)
{
uint32_t active_irq;
active_irq = gic_c_read_4(GICC_IAR);
/*
* Immediatly EOIR the SGIs, because doing so requires the other
* bits (ie CPU number), not just the IRQ number, and we do not
* have this information later.
*/
if ((active_irq & 0x3ff) < 16)
gic_c_write_4(GICC_EOIR, active_irq);
active_irq &= 0x3FF;
if (active_irq == 0x3FF) {
if (last_irq == -1)
printf("Spurious interrupt detected [0x%08x]\n", active_irq);
return -1;
}
gic_c_write_4(GICC_EOIR, active_irq);
return active_irq;
}
void
arm_mask_irq(uintptr_t nb)
{
gic_d_write_4(GICD_ICENABLER(nb >> 5), (1UL << (nb & 0x1F)));
}
void
arm_unmask_irq(uintptr_t nb)
{
gic_c_write_4(GICC_EOIR, nb);
gic_d_write_4(GICD_ISENABLER(nb >> 5), (1UL << (nb & 0x1F)));
}
#ifdef SMP
void
pic_ipi_send(cpuset_t cpus, u_int ipi)
{
uint32_t val = 0, i;
for (i = 0; i < MAXCPU; i++)
if (CPU_ISSET(i, &cpus))
val |= 1 << (16 + i);
gic_d_write_4(GICD_SGIR(0), val | ipi);
}
int
pic_ipi_get(int i)
{
if (i != -1) {
/*
* The intr code will automagically give the frame pointer
* if the interrupt argument is 0.
*/
if ((unsigned int)i > 16)
return (0);
return (i);
}
return (0x3ff);
}
void
pic_ipi_clear(int ipi)
{
}
#endif

View File

@ -236,6 +236,17 @@ const struct cpuidtab cpuids[] = {
{ CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S",
generic_steppings },
{ CPU_ID_CORTEXA8R1, CPU_CLASS_CORTEXA, "Cortex A8-r1",
generic_steppings },
{ CPU_ID_CORTEXA8R2, CPU_CLASS_CORTEXA, "Cortex A8-r2",
generic_steppings },
{ CPU_ID_CORTEXA8R3, CPU_CLASS_CORTEXA, "Cortex A8-r3",
generic_steppings },
{ CPU_ID_CORTEXA9R1, CPU_CLASS_CORTEXA, "Cortex A9-r1",
generic_steppings },
{ CPU_ID_CORTEXA9R2, CPU_CLASS_CORTEXA, "Cortex A9-r2",
generic_steppings },
{ CPU_ID_SA110, CPU_CLASS_SA1, "SA-110",
sa110_steppings },
{ CPU_ID_SA1100, CPU_CLASS_SA1, "SA-1100",
@ -302,8 +313,17 @@ const struct cpuidtab cpuids[] = {
{ CPU_ID_MV88FR571_VD, CPU_CLASS_MARVELL, "Feroceon 88FR571-VD",
generic_steppings },
{ CPU_ID_MV88FR571_41, CPU_CLASS_MARVELL, "Early Feroceon 88FR571",
{ CPU_ID_MV88SV581X_V6, CPU_CLASS_MARVELL, "Sheeva 88SV581x",
generic_steppings },
{ CPU_ID_ARM_88SV581X_V6, CPU_CLASS_MARVELL, "Sheeva 88SV581x",
generic_steppings },
{ CPU_ID_MV88SV581X_V7, CPU_CLASS_MARVELL, "Sheeva 88SV581x",
generic_steppings },
{ CPU_ID_ARM_88SV581X_V7, CPU_CLASS_MARVELL, "Sheeva 88SV581x",
generic_steppings },
{ CPU_ID_MV88SV584X, CPU_CLASS_MARVELL, "Sheeva 88SV584x",
generic_steppings },
{ CPU_ID_ARM_88SV584X, CPU_CLASS_MARVELL, "Sheeva 88SV584x",
generic_steppings },
{ 0, CPU_CLASS_NONE, NULL, NULL }
@ -328,6 +348,7 @@ const struct cpu_classtab cpu_classes[] = {
{ "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */
{ "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
{ "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */
{ "Cortex-A", "CPU_CORTEXA" }, /* CPU_CLASS_CORTEXA */
{ "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
{ "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */
{ "ARM11J", "CPU_ARM11" }, /* CPU_CLASS_ARM11J */
@ -359,13 +380,81 @@ static const char * const wtnames[] = {
"**unknown 15**",
};
static void
print_enadis(int enadis, char *s)
{
printf(" %s %sabled", s, (enadis == 0) ? "dis" : "en");
}
extern int ctrl;
enum cpu_class cpu_class = CPU_CLASS_NONE;
u_int cpu_pfr(int num)
{
u_int feat;
switch (num) {
case 0:
__asm __volatile("mrc p15, 0, %0, c0, c1, 0"
: "=r" (feat));
break;
case 1:
__asm __volatile("mrc p15, 0, %0, c0, c1, 1"
: "=r" (feat));
break;
default:
panic("Processor Feature Register %d not implemented", num);
break;
}
return (feat);
}
static
void identify_armv7(void)
{
u_int feature;
printf("Supported features:");
/* Get Processor Feature Register 0 */
feature = cpu_pfr(0);
if (feature & ARM_PFR0_ARM_ISA_MASK)
printf(" ARM_ISA");
if (feature & ARM_PFR0_THUMB2)
printf(" THUMB2");
else if (feature & ARM_PFR0_THUMB)
printf(" THUMB");
if (feature & ARM_PFR0_JAZELLE_MASK)
printf(" JAZELLE");
if (feature & ARM_PFR0_THUMBEE_MASK)
printf(" THUMBEE");
/* Get Processor Feature Register 1 */
feature = cpu_pfr(1);
if (feature & ARM_PFR1_ARMV4_MASK)
printf(" ARMv4");
if (feature & ARM_PFR1_SEC_EXT_MASK)
printf(" Security_Ext");
if (feature & ARM_PFR1_MICROCTRL_MASK)
printf(" M_profile");
printf("\n");
}
void
identify_arm_cpu(void)
{
u_int cpuid;
u_int cpuid, reg, size, sets, ways;
u_int8_t type, linesize;
int i;
cpuid = cpu_id();
@ -389,74 +478,130 @@ identify_arm_cpu(void)
printf("unknown CPU (ID = 0x%x)\n", cpuid);
printf(" ");
switch (cpu_class) {
case CPU_CLASS_ARM6:
case CPU_CLASS_ARM7:
case CPU_CLASS_ARM7TDMI:
case CPU_CLASS_ARM8:
if ((ctrl & CPU_CONTROL_IDC_ENABLE) == 0)
printf(" IDC disabled");
else
printf(" IDC enabled");
break;
case CPU_CLASS_ARM9TDMI:
case CPU_CLASS_ARM9ES:
case CPU_CLASS_ARM9EJS:
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_SA1:
case CPU_CLASS_XSCALE:
case CPU_CLASS_ARM11J:
case CPU_CLASS_MARVELL:
if ((ctrl & CPU_CONTROL_DC_ENABLE) == 0)
printf(" DC disabled");
else
printf(" DC enabled");
if ((ctrl & CPU_CONTROL_IC_ENABLE) == 0)
printf(" IC disabled");
else
printf(" IC enabled");
#ifdef CPU_XSCALE_81342
if ((ctrl & CPU_CONTROL_L2_ENABLE) == 0)
printf(" L2 disabled");
else
printf(" L2 enabled");
#endif
break;
default:
break;
}
if ((ctrl & CPU_CONTROL_WBUF_ENABLE) == 0)
printf(" WB disabled");
else
printf(" WB enabled");
if ((cpuid & CPU_ID_ARCH_MASK) == CPU_ID_CPUID_SCHEME) {
identify_armv7();
} else {
if (ctrl & CPU_CONTROL_BEND_ENABLE)
printf(" Big-endian");
else
printf(" Little-endian");
switch (cpu_class) {
case CPU_CLASS_ARM6:
case CPU_CLASS_ARM7:
case CPU_CLASS_ARM7TDMI:
case CPU_CLASS_ARM8:
print_enadis(ctrl & CPU_CONTROL_IDC_ENABLE, "IDC");
break;
case CPU_CLASS_ARM9TDMI:
case CPU_CLASS_ARM9ES:
case CPU_CLASS_ARM9EJS:
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_SA1:
case CPU_CLASS_XSCALE:
case CPU_CLASS_ARM11J:
case CPU_CLASS_MARVELL:
print_enadis(ctrl & CPU_CONTROL_DC_ENABLE, "DC");
print_enadis(ctrl & CPU_CONTROL_IC_ENABLE, "IC");
#ifdef CPU_XSCALE_81342
print_enadis(ctrl & CPU_CONTROL_L2_ENABLE, "L2");
#endif
#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
i = sheeva_control_ext(0, 0);
print_enadis(i & MV_WA_ENABLE, "WA");
print_enadis(i & MV_DC_STREAM_ENABLE, "DC streaming");
printf("\n ");
print_enadis((i & MV_BTB_DISABLE) == 0, "BTB");
print_enadis(i & MV_L2_ENABLE, "L2");
print_enadis((i & MV_L2_PREFETCH_DISABLE) == 0,
"L2 prefetch");
printf("\n ");
#endif
break;
default:
break;
}
}
print_enadis(ctrl & CPU_CONTROL_WBUF_ENABLE, "WB");
if (ctrl & CPU_CONTROL_LABT_ENABLE)
printf(" LABT");
else
printf(" EABT");
if (ctrl & CPU_CONTROL_BPRD_ENABLE)
printf(" branch prediction enabled");
print_enadis(ctrl & CPU_CONTROL_BPRD_ENABLE, "branch prediction");
printf("\n");
/* Print cache info. */
if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0)
return;
if (arm_pcache_unified) {
printf(" %dKB/%dB %d-way %s unified cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
if (arm_cache_level) {
printf("LoUU:%d LoC:%d LoUIS:%d \n", CPU_CLIDR_LOUU(arm_cache_level) + 1,
arm_cache_loc, CPU_CLIDR_LOUIS(arm_cache_level) + 1);
i = 0;
while (((type = CPU_CLIDR_CTYPE(arm_cache_level, i)) != 0) && i < 7) {
printf("Cache level %d: \n", i + 1);
if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
type == CACHE_SEP_CACHE) {
reg = arm_cache_type[2 * i];
ways = CPUV7_CT_xSIZE_ASSOC(reg) + 1;
sets = CPUV7_CT_xSIZE_SET(reg) + 1;
linesize = 1 << (CPUV7_CT_xSIZE_LEN(reg) + 4);
size = (ways * sets * linesize) / 1024;
if (type == CACHE_UNI_CACHE)
printf(" %dKB/%dB %d-way unified cache", size, linesize,ways);
else
printf(" %dKB/%dB %d-way data cache", size, linesize, ways);
if (reg & CPUV7_CT_CTYPE_WT)
printf(" WT");
if (reg & CPUV7_CT_CTYPE_WB)
printf(" WB");
if (reg & CPUV7_CT_CTYPE_RA)
printf(" Read-Alloc");
if (reg & CPUV7_CT_CTYPE_WA)
printf(" Write-Alloc");
printf("\n");
}
if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
reg = arm_cache_type[(2 * i) + 1];
ways = CPUV7_CT_xSIZE_ASSOC(reg) + 1;
sets = CPUV7_CT_xSIZE_SET(reg) + 1;
linesize = 1 << (CPUV7_CT_xSIZE_LEN(reg) + 4);
size = (ways * sets * linesize) / 1024;
printf(" %dKB/%dB %d-way instruction cache", size, linesize, ways);
if (reg & CPUV7_CT_CTYPE_WT)
printf(" WT");
if (reg & CPUV7_CT_CTYPE_WB)
printf(" WB");
if (reg & CPUV7_CT_CTYPE_RA)
printf(" Read-Alloc");
if (reg & CPUV7_CT_CTYPE_WA)
printf(" Write-Alloc");
printf("\n");
}
i++;
}
} else {
printf(" %dKB/%dB %d-way Instruction cache\n",
arm_picache_size / 1024,
arm_picache_line_size, arm_picache_ways);
printf(" %dKB/%dB %d-way %s Data cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
/* Print cache info. */
if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0)
return;
if (arm_pcache_unified) {
printf(" %dKB/%dB %d-way %s unified cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
} else {
printf(" %dKB/%dB %d-way instruction cache\n",
arm_picache_size / 1024,
arm_picache_line_size, arm_picache_ways);
printf(" %dKB/%dB %d-way %s data cache\n",
arm_pdcache_size / 1024,
arm_pdcache_line_size, arm_pdcache_ways,
wtnames[arm_pcache_type]);
}
}
}

View File

@ -1,6 +1,7 @@
/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */
/*-
* Copyright 2011 Semihalf
* Copyright (C) 1994-1997 Mark Brinicombe
* Copyright (C) 1994 Brini
* All rights reserved.
@ -41,7 +42,7 @@
__FBSDID("$FreeBSD$");
/* What size should this really be ? It is only used by initarm() */
#define INIT_ARM_STACK_SIZE 2048
#define INIT_ARM_STACK_SIZE (2048 * 4)
#define CPWAIT_BRANCH \
sub pc, pc, #4
@ -161,15 +162,26 @@ Lunmapped:
orrne r5, r5, #PHYSADDR
movne pc, r5
#if defined(SMP)
orr r0, r0, #2 /* Set TTB shared memory flag */
#endif
mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
#if defined(CPU_ARM11) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
mov r0, #0
mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
#endif
/* Set the Domain Access register. Very important! */
mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
mcr p15, 0, r0, c3, c0, 0
/* Enable MMU */
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #CPU_CONTROL_MMU_ENABLE
#if defined(CPU_ARM11) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
orr r0, r0, #CPU_CONTROL_V6_EXTPAGE
#endif
orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
mcr p15, 0, r0, c1, c0, 0
nop
nop
@ -225,13 +237,23 @@ Lend:
.word _edata
Lstartup_pagetable:
.word STARTUP_PAGETABLE_ADDR
#ifdef SMP
Lstartup_pagetable_secondary:
.word temp_pagetable
#endif
mmu_init_table:
/* fill all table VA==PA */
/* map SDRAM VA==PA, WT cacheable */
#if !defined(SMP)
MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
/* map VA 0xc0000000..0xc3ffffff to PA */
MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
#else
MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
/* map VA 0xc0000000..0xc3ffffff to PA */
MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
#endif
.word 0 /* end of table */
#endif
.Lstart:
@ -241,6 +263,11 @@ mmu_init_table:
.Lvirt_done:
.word virt_done
#if defined(SMP)
.Lmpvirt_done:
.word mpvirt_done
#endif
.Lmainreturned:
.asciz "main() returned"
.align 0
@ -255,6 +282,133 @@ svcstk:
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
#if defined(SMP)
Lsramaddr:
.word 0xffff0080
#if 0
#define AP_DEBUG(tmp) \
mrc p15, 0, r1, c0, c0, 5; \
ldr r0, Lsramaddr; \
add r0, r1, lsl #2; \
mov r1, tmp; \
str r1, [r0], #0x0000;
#else
#define AP_DEBUG(tmp)
#endif
ASENTRY_NP(mptramp)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0
AP_DEBUG(#1)
mrs r3, cpsr_all
bic r3, r3, #(PSR_MODE)
orr r3, r3, #(PSR_SVC32_MODE)
msr cpsr_all, r3
mrc p15, 0, r0, c0, c0, 5
and r0, #0x0f /* Get CPU ID */
/* Read boot address for CPU */
mov r1, #0x100
mul r2, r0, r1
ldr r1, Lpmureg
add r0, r2, r1
ldr r1, [r0], #0x00
mov pc, r1
Lpmureg:
.word 0xd0022124
ASENTRY_NP(mpentry)
AP_DEBUG(#2)
/* Make sure interrupts are disabled. */
mrs r7, cpsr
orr r7, r7, #(I32_bit|F32_bit)
msr cpsr_c, r7
adr r7, Ltag
bic r7, r7, #0xf0000000
orr r7, r7, #PHYSADDR
/* Disable MMU for a while */
mrc p15, 0, r2, c1, c0, 0
bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
CPU_CONTROL_WBUF_ENABLE)
bic r2, r2, #(CPU_CONTROL_IC_ENABLE)
bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
mcr p15, 0, r2, c1, c0, 0
nop
nop
nop
AP_DEBUG(#3)
Ltag:
ldr r0, Lstartup_pagetable_secondary
bic r0, r0, #0xf0000000
orr r0, r0, #PHYSADDR
ldr r0, [r0]
#if defined(SMP)
orr r0, r0, #0 /* Set TTB shared memory flag */
#endif
mcr p15, 0, r0, c2, c0, 0 /* Set TTB */
mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */
#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
mov r0, #0
mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */
#endif
AP_DEBUG(#4)
/* Set the Domain Access register. Very important! */
mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
mcr p15, 0, r0, c3, c0, 0
/* Enable MMU */
mrc p15, 0, r0, c1, c0, 0
#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
orr r0, r0, #CPU_CONTROL_V6_EXTPAGE
#endif
orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
mcr p15, 0, r0, c1, c0, 0
nop
nop
nop
CPWAIT(r0)
adr r1, .Lstart
ldmia r1, {r1, r2, sp} /* Set initial stack and */
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
mov r1, #2048
mul r2, r1, r0
sub sp, sp, r2
str r1, [sp]
ldr pc, .Lmpvirt_done
mpvirt_done:
mov fp, #0 /* trace back starts here */
bl _C_LABEL(init_secondary) /* Off we go */
adr r0, .Lmpreturned
b _C_LABEL(panic)
/* NOTREACHED */
.Lmpreturned:
.asciz "main() returned"
.align 0
#endif
ENTRY_NP(cpu_halt)
mrs r2, cpsr
bic r2, r2, #(PSR_MODE)

View File

@ -44,6 +44,7 @@
#include "opt_compat.h"
#include "opt_ddb.h"
#include "opt_timer.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@ -93,8 +94,10 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
#include <machine/sysarch.h>
static struct trapframe proc0_tf;
struct pcpu __pcpu[MAXCPU];
struct pcpu *pcpup = &__pcpu[0];
static struct trapframe proc0_tf;
uint32_t cpu_reset_address = 0;
int cold = 1;
vm_offset_t vector_page;
@ -278,8 +281,10 @@ static void
cpu_startup(void *dummy)
{
struct pcb *pcb = thread0.td_pcb;
#ifdef ARM_TP_ADDRESS
#ifndef ARM_CACHE_LOCK_ENABLE
vm_page_t m;
#endif
#endif
cpu_setup("");
@ -322,6 +327,7 @@ cpu_startup(void *dummy)
vector_page_setprot(VM_PROT_READ);
pmap_set_pcb_pagedir(pmap_kernel(), pcb);
pmap_postinit();
#ifdef ARM_TP_ADDRESS
#ifdef ARM_CACHE_LOCK_ENABLE
pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
arm_lock_cache_line(ARM_TP_ADDRESS);
@ -331,6 +337,7 @@ cpu_startup(void *dummy)
#endif
*(uint32_t *)ARM_RAS_START = 0;
*(uint32_t *)ARM_RAS_END = 0xffffffff;
#endif
}
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
@ -358,7 +365,20 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
void
cpu_idle(int busy)
{
#ifndef NO_EVENTTIMERS
if (!busy) {
critical_enter();
cpu_idleclock();
}
#endif
cpu_sleep(0);
#ifndef NO_EVENTTIMERS
if (!busy) {
cpu_activeclock();
critical_exit();
}
#endif
}
int
@ -768,6 +788,19 @@ fake_preload_metadata(struct arm_boot_params *abp __unused)
return (lastaddr);
}
void
pcpu0_init(void)
{
#if ARM_ARCH_7A || defined(CPU_MV_PJ4B)
set_pcpu(pcpup);
#endif
pcpu_init(pcpup, 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
#ifdef ARM_VFP_SUPPORT
PCPU_SET(cpu, 0);
#endif
}
#if defined(LINUX_BOOT_ABI)
vm_offset_t
linux_parse_boot_param(struct arm_boot_params *abp)

393
sys/arm/arm/mp_machdep.c Normal file
View File

@ -0,0 +1,393 @@
/*-
* Copyright (c) 2011 Semihalf.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/pcpu.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/ktr.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <machine/cpu.h>
#include <machine/smp.h>
#include <machine/pcb.h>
#include <machine/pte.h>
#include <machine/intr.h>
#include <machine/vmparam.h>
#include "opt_smp.h"
void *temp_pagetable;
extern struct pcpu __pcpu[];
/* used to hold the AP's until we are ready to release them */
struct mtx ap_boot_mtx;
struct pcb stoppcbs[MAXCPU];
/* # of Applications processors */
volatile int mp_naps;
/* Set to 1 once we're ready to let the APs out of the pen. */
volatile int aps_ready = 0;
static int ipi_handler(void *arg);
void set_stackptrs(int cpu);
/* Temporary variables for init_secondary() */
void *dpcpu[MAXCPU - 1];
/* Determine if we running MP machine */
int
cpu_mp_probe(void)
{
CPU_SETOF(0, &all_cpus);
return (platform_mp_probe());
}
/* Start Application Processor via platform specific function */
static int
check_ap(void)
{
uint32_t ms;
for (ms = 0; ms < 2000; ++ms) {
if ((mp_naps + 1) == mp_ncpus)
return (0); /* success */
else
DELAY(1000);
}
return (-2);
}
extern unsigned char _end[];
/* Initialize and fire up non-boot processors */
void
cpu_mp_start(void)
{
int error, i;
vm_offset_t temp_pagetable_va;
vm_paddr_t addr, addr_end;
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
dpcpu[i] = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
temp_pagetable_va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE,
M_TEMP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
addr = KERNPHYSADDR;
addr_end = (vm_offset_t)&_end - KERNVIRTADDR + KERNPHYSADDR;
addr_end &= ~L1_S_OFFSET;
addr_end += L1_S_SIZE;
bzero((void *)temp_pagetable_va, L1_TABLE_SIZE);
for (addr = KERNPHYSADDR; addr <= addr_end; addr += L1_S_SIZE) {
((int *)(temp_pagetable_va))[addr >> L1_S_SHIFT] =
L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)|L1_S_DOM(PMAP_DOMAIN_KERNEL)|addr;
((int *)(temp_pagetable_va))[(addr -
KERNPHYSADDR + KERNVIRTADDR) >> L1_S_SHIFT] =
L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)|L1_S_DOM(PMAP_DOMAIN_KERNEL)|addr;
}
temp_pagetable = (void*)(vtophys(temp_pagetable_va));
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
/* Initialize boot code and start up processors */
platform_mp_start_ap();
/* Check if ap's started properly */
error = check_ap();
if (error)
printf("WARNING: Some AP's failed to start\n");
else
for (i = 1; i < mp_ncpus; i++)
CPU_SET(i, &all_cpus);
contigfree((void *)temp_pagetable_va, L1_TABLE_SIZE, M_TEMP);
}
/* Introduce rest of cores to the world */
void
cpu_mp_announce(void)
{
}
extern vm_paddr_t pmap_pa;
void
init_secondary(int cpu)
{
struct pcpu *pc;
uint32_t loop_counter;
int start = 0, end = 0;
cpu_setup(NULL);
setttb(pmap_pa);
cpu_tlb_flushID();
pc = &__pcpu[cpu];
set_pcpu(pc);
pcpu_init(pc, cpu, sizeof(struct pcpu));
dpcpu_init(dpcpu[cpu - 1], cpu);
/* Provide stack pointers for other processor modes. */
set_stackptrs(cpu);
/* Signal our startup to BSP */
atomic_add_rel_32(&mp_naps, 1);
/* Spin until the BSP releases the APs */
while (!aps_ready)
;
/* Initialize curthread */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
pc->pc_curthread = pc->pc_idlethread;
pc->pc_curpcb = pc->pc_idlethread->td_pcb;
mtx_lock_spin(&ap_boot_mtx);
atomic_add_rel_32(&smp_cpus, 1);
if (smp_cpus == mp_ncpus) {
/* enable IPI's, tlb shootdown, freezes etc */
atomic_store_rel_int(&smp_started, 1);
smp_active = 1;
}
mtx_unlock_spin(&ap_boot_mtx);
/* Enable ipi */
#ifdef IPI_IRQ_START
start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
end = IPI_IRQ_END;
#else
end = IPI_IRQ_START;
#endif
#endif
for (int i = start; i <= end; i++)
arm_unmask_irq(i);
enable_interrupts(I32_bit);
loop_counter = 0;
while (smp_started == 0) {
DELAY(100);
loop_counter++;
if (loop_counter == 1000)
CTR0(KTR_SMP, "AP still wait for smp_started");
}
/* Start per-CPU event timers. */
cpu_initclocks_ap();
CTR0(KTR_SMP, "go into scheduler");
platform_mp_init_secondary();
/* Enter the scheduler */
sched_throw(NULL);
panic("scheduler returned us to %s", __func__);
/* NOTREACHED */
}
static int
ipi_handler(void *arg)
{
u_int cpu, ipi;
cpu = PCPU_GET(cpuid);
ipi = pic_ipi_get((int)arg);
while ((ipi != 0x3ff)) {
switch (ipi) {
case IPI_RENDEZVOUS:
CTR0(KTR_SMP, "IPI_RENDEZVOUS");
smp_rendezvous_action();
break;
case IPI_AST:
CTR0(KTR_SMP, "IPI_AST");
break;
case IPI_STOP:
case IPI_STOP_HARD:
/*
* IPI_STOP_HARD is mapped to IPI_STOP so it is not
* necessary to add it in the switch.
*/
CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
savectx(&stoppcbs[cpu]);
/* Indicate we are stopped */
CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
while (!CPU_ISSET(cpu, &started_cpus))
cpu_spinwait();
CPU_CLR_ATOMIC(cpu, &started_cpus);
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
CTR0(KTR_SMP, "IPI_STOP (restart)");
break;
case IPI_PREEMPT:
CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
sched_preempt(curthread);
break;
case IPI_HARDCLOCK:
CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
hardclockintr();
break;
case IPI_TLB:
CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
cpufuncs.cf_tlb_flushID();
break;
default:
panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
}
pic_ipi_clear(ipi);
ipi = pic_ipi_get(-1);
}
return (FILTER_HANDLED);
}
static void
release_aps(void *dummy __unused)
{
uint32_t loop_counter;
int start = 0, end = 0;
if (mp_ncpus == 1)
return;
#ifdef IPI_IRQ_START
start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
end = IPI_IRQ_END;
#else
end = IPI_IRQ_START;
#endif
#endif
for (int i = start; i <= end; i++) {
/*
* IPI handler
*/
/*
* Use 0xdeadbeef as the argument value for irq 0,
* if we used 0, the intr code will give the trap frame
* pointer instead.
*/
arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i,
INTR_TYPE_MISC | INTR_EXCL, NULL);
/* Enable ipi */
arm_unmask_irq(i);
}
atomic_store_rel_int(&aps_ready, 1);
printf("Release APs\n");
for (loop_counter = 0; loop_counter < 2000; loop_counter++) {
if (smp_started)
return;
DELAY(1000);
}
printf("AP's not started\n");
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
struct cpu_group *
cpu_topo(void)
{
return (smp_topo_1level(CG_SHARE_L2, 1, 0));
}
void
cpu_mp_setmaxid(void)
{
platform_mp_setmaxid();
}
/* Sending IPI */
void
ipi_all_but_self(u_int ipi)
{
cpuset_t other_cpus;
other_cpus = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
platform_ipi_send(other_cpus, ipi);
}
void
ipi_cpu(int cpu, u_int ipi)
{
cpuset_t cpus;
CPU_ZERO(&cpus);
CPU_SET(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
platform_ipi_send(cpus, ipi);
}
void
ipi_selected(cpuset_t cpus, u_int ipi)
{
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
platform_ipi_send(cpus, ipi);
}
void
tlb_broadcast(int ipi)
{
if (smp_started)
ipi_all_but_self(ipi);
}

431
sys/arm/arm/mpcore_timer.c Normal file
View File

@ -0,0 +1,431 @@
/*-
* Copyright (c) 2011 The FreeBSD Foundation
* All rights reserved.
*
* Developed by Ben Gray <ben.r.gray@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/**
* The ARM Cortex-A9 core can support a global timer plus a private and
* watchdog timer per core. This driver reserves memory and interrupt
* resources for accessing both timer register sets, these resources are
* stored globally and used to setup the timecount and eventtimer.
*
* The timecount timer uses the global 64-bit counter, whereas the
* per-CPU eventtimer uses the private 32-bit counters.
*
*
* REF: ARM Cortex-A9 MPCore, Technical Reference Manual (rev. r2p2)
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/rman.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <sys/watchdog.h>
#include <machine/bus.h>
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/intr.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <machine/bus.h>
#include <machine/fdt.h>
/* Private (per-CPU) timer register map */
#define PRV_TIMER_LOAD 0x0000
#define PRV_TIMER_COUNT 0x0004
#define PRV_TIMER_CTRL 0x0008
#define PRV_TIMER_INTR 0x000C
#define PRV_TIMER_CTR_PRESCALER_SHIFT 8
#define PRV_TIMER_CTRL_IRQ_ENABLE (1UL << 2)
#define PRV_TIMER_CTRL_AUTO_RELOAD (1UL << 1)
#define PRV_TIMER_CTRL_TIMER_ENABLE (1UL << 0)
#define PRV_TIMER_INTR_EVENT (1UL << 0)
/* Global timer register map */
#define GBL_TIMER_COUNT_LOW 0x0000
#define GBL_TIMER_COUNT_HIGH 0x0004
#define GBL_TIMER_CTRL 0x0008
#define GBL_TIMER_INTR 0x000C
#define GBL_TIMER_CTR_PRESCALER_SHIFT 8
#define GBL_TIMER_CTRL_AUTO_INC (1UL << 3)
#define GBL_TIMER_CTRL_IRQ_ENABLE (1UL << 2)
#define GBL_TIMER_CTRL_COMP_ENABLE (1UL << 1)
#define GBL_TIMER_CTRL_TIMER_ENABLE (1UL << 0)
#define GBL_TIMER_INTR_EVENT (1UL << 0)
struct arm_tmr_softc {
struct resource * tmr_res[4];
bus_space_tag_t prv_bst;
bus_space_tag_t gbl_bst;
bus_space_handle_t prv_bsh;
bus_space_handle_t gbl_bsh;
uint32_t clkfreq;
struct eventtimer et;
};
static struct resource_spec arm_tmr_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Global registers */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Global timer interrupt (unused) */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Private (per-CPU) registers */
{ SYS_RES_IRQ, 1, RF_ACTIVE }, /* Private timer interrupt */
{ -1, 0 }
};
static struct arm_tmr_softc *arm_tmr_sc = NULL;
#define tmr_prv_read_4(reg) \
bus_space_read_4(arm_tmr_sc->prv_bst, arm_tmr_sc->prv_bsh, reg)
#define tmr_prv_write_4(reg, val) \
bus_space_write_4(arm_tmr_sc->prv_bst, arm_tmr_sc->prv_bsh, reg, val)
#define tmr_gbl_read_4(reg) \
bus_space_read_4(arm_tmr_sc->gbl_bst, arm_tmr_sc->gbl_bsh, reg)
#define tmr_gbl_write_4(reg, val) \
bus_space_write_4(arm_tmr_sc->gbl_bst, arm_tmr_sc->gbl_bsh, reg, val)
static timecounter_get_t arm_tmr_get_timecount;
static struct timecounter arm_tmr_timecount = {
.tc_name = "ARM MPCore Timecouter",
.tc_get_timecount = arm_tmr_get_timecount,
.tc_poll_pps = NULL,
.tc_counter_mask = ~0u,
.tc_frequency = 0,
.tc_quality = 1000,
};
/**
* arm_tmr_get_timecount - reads the timecount (global) timer
* @tc: pointer to arm_tmr_timecount struct
*
* We only read the lower 32-bits, the timecount stuff only uses 32-bits
* so (for now?) ignore the upper 32-bits.
*
* RETURNS
* The lower 32-bits of the counter.
*/
static unsigned
arm_tmr_get_timecount(struct timecounter *tc)
{
return (tmr_gbl_read_4(GBL_TIMER_COUNT_LOW));
}
/**
* arm_tmr_start - starts the eventtimer (private) timer
* @et: pointer to eventtimer struct
* @first: the number of seconds and fractional sections to trigger in
* @period: the period (in seconds and fractional sections) to set
*
* If the eventtimer is required to be in oneshot mode, period will be
* NULL and first will point to the time to trigger. If in periodic mode
* period will contain the time period and first may optionally contain
* the time for the first period.
*
* RETURNS
* Always returns 0
*/
static int
arm_tmr_start(struct eventtimer *et, struct bintime *first,
struct bintime *period)
{
struct arm_tmr_softc *sc = (struct arm_tmr_softc *)et->et_priv;
uint32_t load, count;
uint32_t ctrl;
ctrl = PRV_TIMER_CTRL_IRQ_ENABLE | PRV_TIMER_CTRL_TIMER_ENABLE;
if (period != NULL) {
load = (et->et_frequency * (period->frac >> 32)) >> 32;
if (period->sec > 0)
load += et->et_frequency * period->sec;
ctrl |= PRV_TIMER_CTRL_AUTO_RELOAD;
} else {
load = 0;
}
if (first != NULL) {
count = (sc->et.et_frequency * (first->frac >> 32)) >> 32;
if (first->sec != 0)
count += sc->et.et_frequency * first->sec;
} else {
count = load;
}
tmr_prv_write_4(PRV_TIMER_LOAD, load);
tmr_prv_write_4(PRV_TIMER_COUNT, count);
tmr_prv_write_4(PRV_TIMER_CTRL, ctrl);
return (0);
}
/**
* arm_tmr_stop - stops the eventtimer (private) timer
* @et: pointer to eventtimer struct
*
* Simply stops the private timer by clearing all bits in the ctrl register.
*
* RETURNS
* Always returns 0
*/
static int
arm_tmr_stop(struct eventtimer *et)
{
tmr_prv_write_4(PRV_TIMER_CTRL, 0);
return (0);
}
/**
* arm_tmr_intr - ISR for the eventtimer (private) timer
* @arg: pointer to arm_tmr_softc struct
*
* Clears the event register and then calls the eventtimer callback.
*
* RETURNS
* Always returns FILTER_HANDLED
*/
static int
arm_tmr_intr(void *arg)
{
struct arm_tmr_softc *sc = (struct arm_tmr_softc *)arg;
tmr_prv_write_4(PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT);
if (sc->et.et_active)
sc->et.et_event_cb(&sc->et, sc->et.et_arg);
return (FILTER_HANDLED);
}
/**
* arm_tmr_probe - timer probe routine
* @dev: new device
*
* The probe function returns success when probed with the fdt compatible
* string set to "arm,mpcore-timers".
*
* RETURNS
* BUS_PROBE_DEFAULT if the fdt device is compatible, otherwise ENXIO.
*/
static int
arm_tmr_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,mpcore-timers"))
return (ENXIO);
device_set_desc(dev, "ARM Generic MPCore Timers");
return (BUS_PROBE_DEFAULT);
}
/**
* arm_tmr_attach - attaches the timer to the simplebus
* @dev: new device
*
* Reserves memory and interrupt resources, stores the softc structure
* globally and registers both the timecount and eventtimer objects.
*
* RETURNS
* Zero on sucess or ENXIO if an error occuried.
*/
static int
arm_tmr_attach(device_t dev)
{
struct arm_tmr_softc *sc = device_get_softc(dev);
phandle_t node;
pcell_t clock;
void *ihl;
if (arm_tmr_sc)
return (ENXIO);
/* Get the base clock frequency */
node = ofw_bus_get_node(dev);
if ((OF_getprop(node, "clock-frequency", &clock, sizeof(clock))) <= 0) {
device_printf(dev, "missing clock-frequency attribute in FDT\n");
return (ENXIO);
}
sc->clkfreq = fdt32_to_cpu(clock);
if (bus_alloc_resources(dev, arm_tmr_spec, sc->tmr_res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* Global timer interface */
sc->gbl_bst = rman_get_bustag(sc->tmr_res[0]);
sc->gbl_bsh = rman_get_bushandle(sc->tmr_res[0]);
/* Private per-CPU timer interface */
sc->prv_bst = rman_get_bustag(sc->tmr_res[2]);
sc->prv_bsh = rman_get_bushandle(sc->tmr_res[2]);
arm_tmr_sc = sc;
/* Disable both timers to start off */
tmr_prv_write_4(PRV_TIMER_CTRL, 0x00000000);
tmr_gbl_write_4(GBL_TIMER_CTRL, 0x00000000);
/* Setup and enable the global timer to use as the timecounter */
tmr_gbl_write_4(GBL_TIMER_CTRL, (0x00 << GBL_TIMER_CTR_PRESCALER_SHIFT) |
GBL_TIMER_CTRL_TIMER_ENABLE);
arm_tmr_timecount.tc_frequency = sc->clkfreq;
tc_init(&arm_tmr_timecount);
/* Setup and enable the timer */
if (bus_setup_intr(dev, sc->tmr_res[3], INTR_TYPE_CLK, arm_tmr_intr,
NULL, sc, &ihl) != 0) {
bus_release_resources(dev, arm_tmr_spec, sc->tmr_res);
device_printf(dev, "Unable to setup the clock irq handler.\n");
return (ENXIO);
}
sc->et.et_name = "ARM MPCore Eventtimer";
sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
sc->et.et_quality = 1000;
sc->et.et_frequency = sc->clkfreq;
sc->et.et_min_period.sec = 0;
sc->et.et_min_period.frac =
((0x00000002LLU << 32) / sc->et.et_frequency) << 32;
sc->et.et_max_period.sec = 0xfffffff0U / sc->et.et_frequency;
sc->et.et_max_period.frac =
((0xfffffffeLLU << 32) / sc->et.et_frequency) << 32;
sc->et.et_start = arm_tmr_start;
sc->et.et_stop = arm_tmr_stop;
sc->et.et_priv = sc;
et_register(&sc->et);
return (0);
}
static device_method_t arm_tmr_methods[] = {
DEVMETHOD(device_probe, arm_tmr_probe),
DEVMETHOD(device_attach, arm_tmr_attach),
{ 0, 0 }
};
static driver_t arm_tmr_driver = {
"mp_tmr",
arm_tmr_methods,
sizeof(struct arm_tmr_softc),
};
static devclass_t arm_tmr_devclass;
DRIVER_MODULE(mp_tmr, simplebus, arm_tmr_driver, arm_tmr_devclass, 0, 0);
/**
* cpu_initclocks - called by system to initialise the cpu clocks
*
* This is a boilerplat function, most of the setup has already been done
* when the driver was attached. Therefore this function must only be called
* after the driver is attached.
*
* RETURNS
* nothing
*/
void
cpu_initclocks(void)
{
if (PCPU_GET(cpuid) == 0)
cpu_initclocks_bsp();
else
cpu_initclocks_ap();
}
/**
* DELAY - Delay for at least usec microseconds.
* @usec: number of microseconds to delay by
*
* This function is called all over the kernel and is suppose to provide a
* consistent delay. This function may also be called before the console
* is setup so no printf's can be called here.
*
* RETURNS:
* nothing
*/
void
DELAY(int usec)
{
int32_t counts_per_usec;
int32_t counts;
uint32_t first, last;
/* Check the timers are setup, if not just use a for loop for the meantime */
if (arm_tmr_sc == NULL) {
for (; usec > 0; usec--)
for (counts = 200; counts > 0; counts--)
cpufunc_nullop(); /* Prevent gcc from optimizing
* out the loop
*/
return;
}
/* Get the number of times to count */
counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
/*
* Clamp the timeout at a maximum value (about 32 seconds with
* a 66MHz clock). *Nobody* should be delay()ing for anywhere
* near that length of time and if they are, they should be hung
* out to dry.
*/
if (usec >= (0x80000000U / counts_per_usec))
counts = (0x80000000U / counts_per_usec) - 1;
else
counts = usec * counts_per_usec;
first = tmr_gbl_read_4(GBL_TIMER_COUNT_LOW);
while (counts > 0) {
last = tmr_gbl_read_4(GBL_TIMER_COUNT_LOW);
counts -= (int32_t)(last - first);
first = last;
}
}

321
sys/arm/arm/pl310.c Normal file
View File

@ -0,0 +1,321 @@
/*-
* Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
* Copyright (c) 2011
* Ben Gray <ben.r.gray@gmail.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/intr.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/pl310.h>
#include <machine/bus.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
/**
* PL310 - L2 Cache Controller register offsets.
*
*/
#define PL310_CACHE_ID 0x000
#define PL310_CACHE_TYPE 0x004
#define PL310_CTRL 0x100
#define PL310_AUX_CTRL 0x104
#define PL310_EVENT_COUNTER_CTRL 0x200
#define PL310_EVENT_COUNTER1_CONF 0x204
#define PL310_EVENT_COUNTER0_CONF 0x208
#define PL310_EVENT_COUNTER1_VAL 0x20C
#define PL310_EVENT_COUNTER0_VAL 0x210
#define PL310_INTR_MASK 0x214
#define PL310_MASKED_INTR_STAT 0x218
#define PL310_RAW_INTR_STAT 0x21C
#define PL310_INTR_CLEAR 0x220
#define PL310_CACHE_SYNC 0x730
#define PL310_INV_LINE_PA 0x770
#define PL310_INV_WAY 0x77C
#define PL310_CLEAN_LINE_PA 0x7B0
#define PL310_CLEAN_LINE_IDX 0x7B8
#define PL310_CLEAN_WAY 0x7BC
#define PL310_CLEAN_INV_LINE_PA 0x7F0
#define PL310_CLEAN_INV_LINE_IDX 0x7F8
#define PL310_CLEAN_INV_WAY 0x7FC
#define PL310_LOCKDOWN_D_WAY(x) (0x900 + ((x) * 8))
#define PL310_LOCKDOWN_I_WAY(x) (0x904 + ((x) * 8))
#define PL310_LOCKDOWN_LINE_ENABLE 0x950
#define PL310_UNLOCK_ALL_LINES_WAY 0x954
#define PL310_ADDR_FILTER_START 0xC00
#define PL310_ADDR_FILTER_END 0xC04
#define PL310_DEBUG_CTRL 0xF40
#define PL310_AUX_CTRL_MASK 0xc0000fff
#define PL310_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define PL310_AUX_CTRL_WAY_SIZE_SHIFT 17
#define PL310_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
#define PL310_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
#define PL310_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
#define PL310_AUX_CTRL_NS_INT_CTRL_SHIFT 27
#define PL310_AUX_CTRL_DATA_PREFETCH_SHIFT 28
#define PL310_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
#define PL310_AUX_CTRL_EARLY_BRESP_SHIFT 30
void omap4_l2cache_wbinv_range(vm_paddr_t physaddr, vm_size_t size);
void omap4_l2cache_inv_range(vm_paddr_t physaddr, vm_size_t size);
void omap4_l2cache_wb_range(vm_paddr_t physaddr, vm_size_t size);
void omap4_l2cache_wbinv_all(void);
void omap4_l2cache_inv_all(void);
void omap4_l2cache_wb_all(void);
static uint32_t g_l2cache_way_mask;
static const uint32_t g_l2cache_line_size = 32;
static const uint32_t g_l2cache_align_mask = (32 - 1);
static uint32_t g_l2cache_size;
static struct pl310_softc *pl310_softc;
/**
* pl310_read4 - read a 32-bit value from the PL310 registers
* pl310_write4 - write a 32-bit value from the PL310 registers
* @off: byte offset within the register set to read from
* @val: the value to write into the register
*
*
* LOCKING:
* None
*
* RETURNS:
* nothing in case of write function, if read function returns the value read.
*/
static __inline uint32_t
pl310_read4(bus_size_t off)
{
return bus_read_4(pl310_softc->sc_mem_res, off);
}
static __inline void
pl310_write4(bus_size_t off, uint32_t val)
{
bus_write_4(pl310_softc->sc_mem_res, off, val);
}
static __inline void
pl310_wait_background_op(uint32_t off, uint32_t mask)
{
while (pl310_read4(off) & mask);
}
/**
* pl310_cache_sync - performs a cache sync operation
*
* According to the TRM:
*
* "Before writing to any other register you must perform an explicit
* Cache Sync operation. This is particularly important when the cache is
* enabled and changes to how the cache allocates new lines are to be made."
*
*
*/
static __inline void
pl310_cache_sync(void)
{
pl310_write4(PL310_CACHE_SYNC, 0);
}
static void
pl310_wbinv_all(void)
{
#if 1
pl310_write4(PL310_DEBUG_CTRL, 3);
#endif
pl310_write4(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
pl310_cache_sync();
#if 1
pl310_write4(PL310_DEBUG_CTRL, 0);
#endif
}
static void
pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
{
if (size & g_l2cache_align_mask) {
size &= ~g_l2cache_align_mask;
size += g_l2cache_line_size;
}
#if 1
pl310_write4(PL310_DEBUG_CTRL, 3);
#endif
while (size > 0) {
#if 1
/*
* Errata 588369 says that clean + inv may keep the
* cache line if it was clean, the recommanded workaround
* is to clean then invalidate the cache line, with
* write-back and cache linefill disabled
*/
pl310_write4(PL310_CLEAN_LINE_PA, start);
pl310_write4(PL310_INV_LINE_PA, start);
#else
pl310_write4(PL310_CLEAN_INV_LINE_PA, start);
#endif
start += g_l2cache_line_size;
size -= g_l2cache_line_size;
}
#if 1
pl310_write4(PL310_DEBUG_CTRL, 0);
#endif
pl310_wait_background_op(PL310_CLEAN_INV_LINE_PA, 1);
pl310_cache_sync();
}
static void
pl310_wb_range(vm_paddr_t start, vm_size_t size)
{
if (size & g_l2cache_align_mask) {
size &= ~g_l2cache_align_mask;
size += g_l2cache_line_size;
}
while (size > 0) {
pl310_write4(PL310_CLEAN_LINE_PA, start);
start += g_l2cache_line_size;
size -= g_l2cache_line_size;
}
pl310_cache_sync();
pl310_wait_background_op(PL310_CLEAN_LINE_PA, 1);
}
static void
pl310_inv_range(vm_paddr_t start, vm_size_t size)
{
if (size & g_l2cache_align_mask) {
size &= ~g_l2cache_align_mask;
size += g_l2cache_line_size;
}
while (size > 0) {
pl310_write4(PL310_INV_LINE_PA, start);
start += g_l2cache_line_size;
size -= g_l2cache_line_size;
}
pl310_cache_sync();
pl310_wait_background_op(PL310_INV_LINE_PA, 1);
}
static int
pl310_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "arm,pl310"))
return (ENXIO);
device_set_desc(dev, "PL310 L2 cache controller");
return (0);
}
static int
pl310_attach(device_t dev)
{
struct pl310_softc *sc = device_get_softc(dev);
int rid = 0;
uint32_t aux_value;
uint32_t way_size;
uint32_t ways_assoc;
uint32_t ctrl_value;
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->sc_mem_res == NULL)
panic("%s: Cannot map registers", device_get_name(dev));
pl310_softc = sc;
platform_init_pl310(sc);
aux_value = pl310_read4(PL310_AUX_CTRL);
way_size = (aux_value & PL310_AUX_CTRL_WAY_SIZE_MASK) >>
PL310_AUX_CTRL_WAY_SIZE_SHIFT;
way_size = 1 << (way_size + 13);
if (aux_value & (1 << PL310_AUX_CTRL_ASSOCIATIVITY_SHIFT))
ways_assoc = 16;
else
ways_assoc = 8;
g_l2cache_way_mask = (1 << ways_assoc) - 1;
g_l2cache_size = way_size * ways_assoc;
/* Print the information */
printf(" L2 Cache: %uKB/%dB %d ways\n", (g_l2cache_size / 1024),
g_l2cache_line_size, ways_assoc);
ctrl_value = pl310_read4(PL310_CTRL);
if (!(ctrl_value & 0x1)) {
/* Enable the L2 cache if disabled */
pl310_write4(PL310_CTRL, ctrl_value & 0x1);
}
pl310_wbinv_all();
/* Set the l2 functions in the set of cpufuncs */
cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
return (0);
}
static device_method_t pl310_methods[] = {
DEVMETHOD(device_probe, pl310_probe),
DEVMETHOD(device_attach, pl310_attach),
{0, 0},
};
static driver_t pl310_driver = {
"l2cache",
pl310_methods,
sizeof(struct pl310_softc),
};
static devclass_t pl310_devclass;
DRIVER_MODULE(pl310, simplebus, pl310_driver, pl310_devclass, 0, 0);

3780
sys/arm/arm/pmap-v6.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -3243,15 +3243,13 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
*ptep = pte;
PTE_SYNC(ptep);
if (pg != NULL) {
if (!(pg->oflags & VPO_UNMANAGED)) {
f = pmap_modify_pv(pg, pm, sva,
PVF_WRITE, 0);
if (!(pg->oflags & VPO_UNMANAGED)) {
f = pmap_modify_pv(pg, pm, sva,
PVF_WRITE, 0);
if (f & PVF_WRITE)
vm_page_dirty(pg);
} else
f = 0;
} else
f = PVF_REF | PVF_EXEC;
f = 0;
if (flush >= 0) {
flush++;

View File

@ -85,64 +85,55 @@
#include <machine/armreg.h>
__FBSDID("$FreeBSD$");
/*
* New experimental definitions of IRQdisable and IRQenable
* These keep FIQ's enabled since FIQ's are special.
*/
#define DOMAIN_CLIENT 0x01
#define IRQdisable \
mrs r14, cpsr ; \
orr r14, r14, #(I32_bit) ; \
msr cpsr_c, r14 ; \
#define IRQenable \
mrs r14, cpsr ; \
bic r14, r14, #(I32_bit) ; \
msr cpsr_c, r14 ; \
#ifdef _ARM_ARCH_6
#define GET_PCPU(tmp) \
mrc p15, 0, tmp, c13, c0, 4;
#else
.Lcurpcpu:
.word _C_LABEL(__pcpu)
/*
* These are used for switching the translation table/DACR.
* Since the vector page can be invalid for a short time, we must
* disable both regular IRQs *and* FIQs.
*
* XXX: This is not necessary if the vector table is relocated.
*/
#define IRQdisableALL \
mrs r14, cpsr ; \
orr r14, r14, #(I32_bit | F32_bit) ; \
msr cpsr_c, r14
#define GET_PCPU(tmp) \
ldr tmp, .Lcurpcpu
#endif
#define IRQenableALL \
mrs r14, cpsr ; \
bic r14, r14, #(I32_bit | F32_bit) ; \
msr cpsr_c, r14
.Lcurpcb:
.word _C_LABEL(__pcpu) + PC_CURPCB
.Lcpufuncs:
.word _C_LABEL(cpufuncs)
.Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
.Lcpu_do_powersave:
.word _C_LABEL(cpu_do_powersave)
.Lblocked_lock:
.word _C_LABEL(blocked_lock)
ENTRY(cpu_throw)
mov r5, r1
/*
* r0 = oldtd
* r5 = newtd
*/
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
GET_PCPU(r7)
#ifdef ARM_VFP_SUPPORT
/*
* vfp_discard will clear pcpu->pc_vfpcthread, and modify
* and modify the control as needed.
*/
ldr r4, [r7, #(PC_VFPCTHREAD)] /* this thread using vfp? */
cmp r0, r4
bne 3f
bl _C_LABEL(vfp_discard) /* yes, shut down vfp */
3:
#endif /* ARM_VFP_SUPPORT */
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
/* Switch to lwp0 context */
ldr r9, .Lcpufuncs
#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B)
mov lr, pc
ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
#endif
ldr r0, [r7, #(PCB_PL1VEC)]
ldr r1, [r7, #(PCB_DACR)]
/*
@ -200,21 +191,24 @@ ENTRY(cpu_throw)
#endif
/* We have a new curthread now so make a note it */
ldr r6, .Lcurthread
GET_CURTHREAD_PTR(r6)
str r5, [r6]
/* Set the new tp */
ldr r6, [r5, #(TD_MD + MD_TP)]
#ifdef ARM_TP_ADDRESS
ldr r4, =ARM_TP_ADDRESS
str r6, [r4]
ldr r6, [r5, #(TD_MD + MD_RAS_START)]
str r6, [r4, #4] /* ARM_RAS_START */
ldr r6, [r5, #(TD_MD + MD_RAS_END)]
str r6, [r4, #8] /* ARM_RAS_END */
#else
mcr p15, 0, r6, c13, c0, 3
#endif
/* Hook in a new pcb */
ldr r6, .Lcurpcb
str r7, [r6]
GET_PCPU(r6)
str r7, [r6, #PC_CURPCB]
ldmfd sp!, {r4-r7, pc}
@ -226,22 +220,15 @@ ENTRY(cpu_switch)
/* rem: r0 = old lwp */
/* rem: interrupts are disabled */
#ifdef MULTIPROCESSOR
/* XXX use curcpu() */
ldr r2, .Lcpu_info_store
str r2, [r6, #(L_CPU)]
#endif
/* Process is now on a processor. */
/* We have a new curthread now so make a note it */
ldr r7, .Lcurthread
GET_CURTHREAD_PTR(r7)
str r1, [r7]
/* Hook in a new pcb */
ldr r7, .Lcurpcb
GET_PCPU(r7)
ldr r2, [r1, #TD_PCB]
str r2, [r7]
str r2, [r7, #PC_CURPCB]
/* rem: r1 = new process */
/* rem: interrupts are enabled */
@ -267,6 +254,7 @@ ENTRY(cpu_switch)
* NOTE: We can now use r8-r13 until it is time to restore
* them for the new process.
*/
#ifdef ARM_TP_ADDRESS
/* Store the old tp */
ldr r3, =ARM_TP_ADDRESS
ldr r9, [r3]
@ -283,12 +271,19 @@ ENTRY(cpu_switch)
str r9, [r3, #4]
ldr r9, [r1, #(TD_MD + MD_RAS_END)]
str r9, [r3, #8]
#else
/* Store the old tp */
mrc p15, 0, r9, c13, c0, 3
str r9, [r0, #(TD_MD + MD_TP)]
/* Set the new tp */
ldr r9, [r1, #(TD_MD + MD_TP)]
mcr p15, 0, r9, c13, c0, 3
#endif
/* Get the user structure for the new process in r9 */
ldr r9, [r1, #(TD_PCB)]
/* r1 now free! */
mrs r3, cpsr
/*
* We can do that, since
@ -300,15 +295,39 @@ ENTRY(cpu_switch)
str sp, [r2, #(PCB_UND_SP)]
msr cpsr_c, r3 /* Restore the old mode */
/* rem: r8 = old PCB */
/* rem: r2 = old PCB */
/* rem: r9 = new PCB */
/* rem: interrupts are enabled */
/* What else needs to be saved Only FPA stuff when that is supported */
#ifdef ARM_VFP_SUPPORT
/*
* vfp_store will clear pcpu->pc_vfpcthread, save
* registers and state, and modify the control as needed.
* a future exception will bounce the backup settings in the fp unit.
* XXX vfp_store can't change r4
*/
GET_PCPU(r7)
ldr r8, [r7, #(PC_VFPCTHREAD)]
cmp r4, r8 /* old thread used vfp? */
bne 1f /* no, don't save */
cmp r1, r4 /* same thread ? */
beq 1f /* yes, skip vfp store */
#ifdef SMP
ldr r8, [r7, #(PC_CPU)] /* last used on this cpu? */
ldr r3, [r2, #(PCB_VFPCPU)]
cmp r8, r3 /* last cpu to use these registers? */
bne 1f /* no. these values are stale */
#endif
add r0, r2, #(PCB_VFPSTATE)
bl _C_LABEL(vfp_store)
1:
#endif /* ARM_VFP_SUPPORT */
/* r1 now free! */
/* Third phase : restore saved context */
/* rem: r8 = old PCB */
/* rem: r2 = old PCB */
/* rem: r9 = new PCB */
/* rem: interrupts are enabled */
@ -333,6 +352,7 @@ ENTRY(cpu_switch)
cmpeq r0, r5 /* Same DACR? */
beq .Lcs_context_switched /* yes! */
#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B)
/*
* Definately need to flush the cache.
*/
@ -340,6 +360,7 @@ ENTRY(cpu_switch)
ldr r1, .Lcpufuncs
mov lr, pc
ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
#endif
.Lcs_cache_purge_skipped:
/* rem: r6 = lock */
/* rem: r9 = new PCB */
@ -408,8 +429,7 @@ ENTRY(cpu_switch)
/* Release the old thread */
str r6, [r4, #TD_LOCK]
ldr r6, .Lblocked_lock
ldr r3, .Lcurthread
ldr r3, [r3]
GET_CURTHREAD_PTR(r3)
1:
ldr r4, [r3, #TD_LOCK]
@ -484,6 +504,27 @@ ENTRY(savectx)
/* Store all the registers in the process's pcb */
add r2, r0, #(PCB_R8)
stmia r2, {r8-r13}
#ifdef ARM_VFP_SUPPORT
/*
* vfp_store will clear pcpu->pc_vfpcthread, save
* registers and state, and modify the control as needed.
* a future exception will bounce the backup settings in the fp unit.
*/
GET_PCPU(r7)
ldr r4, [r7, #(PC_VFPCTHREAD)] /* vfp thread */
ldr r2, [r7, #(PC_CURTHREAD)] /* current thread */
cmp r4, r2
bne 1f
#ifdef SMP
ldr r2, [r7, #(PC_CPU)] /* last used on this cpu? */
ldr r3, [r0, #(PCB_VFPCPU)]
cmp r2, r3
bne 1f /* no. these values are stale */
#endif
add r0, r0, #(PCB_VFPSTATE)
bl _C_LABEL(vfp_store)
1:
#endif /* ARM_VFP_SUPPORT */
ldmfd sp!, {r4-r7, pc}
ENTRY(fork_trampoline)

View File

@ -88,7 +88,14 @@ static int
arm32_set_tp(struct thread *td, void *args)
{
td->td_md.md_tp = (register_t)args;
if (td != curthread)
td->td_md.md_tp = (register_t)args;
else
#ifndef ARM_TP_ADDRESS
set_tls(args);
#else
*(register_t *)ARM_TP_ADDRESS = (register_t)args;
#endif
return (0);
}
@ -96,7 +103,14 @@ static int
arm32_get_tp(struct thread *td, void *args)
{
td->td_retval[0] = td->td_md.md_tp;
if (td != curthread)
td->td_retval[0] = td->td_md.md_tp;
else
#ifndef ARM_TP_ADDRESS
td->td_retval[0] = (register_t)get_tls();
#else
td->td_retval[0] = *(register_t *)ARM_TP_ADDRESS;
#endif
return (0);
}

View File

@ -237,10 +237,16 @@ undefinedinstruction(trapframe_t *frame)
* instruction trap.
*/
coprocessor = 0;
if ((fault_instruction & (1 << 27)) != 0)
coprocessor = (fault_instruction >> 8) & 0x0f;
else
coprocessor = 0;
#ifdef ARM_VFP_SUPPORT
else { /* check for special instructions */
if (((fault_instruction & 0xfe000000) == 0xf2000000) ||
((fault_instruction & 0xff100000) == 0xf4000000))
coprocessor = 10; /* vfp / simd */
}
#endif /* ARM_VFP_SUPPORT */
if ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) {
/*

260
sys/arm/arm/vfp.c Normal file
View File

@ -0,0 +1,260 @@
/*
* Copyright (c) 2012 Mark Tinguely
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <machine/fp.h>
#include <machine/pcb.h>
#include <machine/undefined.h>
#include <machine/vfp.h>
/* function prototypes */
unsigned int get_coprocessorACR(void);
int vfp_bounce(u_int, u_int, struct trapframe *, int);
void vfp_discard(void);
void vfp_enable(void);
void vfp_init(void);
void vfp_restore(struct vfp_state *);
void vfp_store(struct vfp_state *);
void set_coprocessorACR(u_int);
boolean_t vfp_exists;
static struct undefined_handler vfp10_uh, vfp11_uh;
/* The VFMXR command using coprocessor commands */
#define fmxr(reg, val) \
__asm __volatile("mcr p10, 7, %0, " #reg " , c0, 0" :: "r" (val));
/* The VFMRX command using coprocessor commands */
#define fmrx(reg) \
({ u_int val = 0;\
__asm __volatile("mrc p10, 7, %0, " #reg " , c0, 0" : "=r" (val));\
val; \
})
u_int
get_coprocessorACR(void)
{
u_int val;
__asm __volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (val) : : "cc");
return val;
}
void
set_coprocessorACR(u_int val)
{
__asm __volatile("mcr p15, 0, %0, c1, c0, 2\n\t"
"isb\n\t"
: : "r" (val) : "cc");
}
/* called for each cpu */
void
vfp_init(void)
{
u_int fpsid, fpexc, tmp;
u_int coproc;
coproc = get_coprocessorACR();
coproc |= COPROC10 | COPROC11;
set_coprocessorACR(coproc);
fpsid = fmrx(cr0); /* read the vfp system id */
fpexc = fmrx(cr8); /* read the vfp exception reg */
if (!(fpsid & VFPSID_HARDSOFT_IMP)) {
vfp_exists = 1;
PCPU_SET(vfpsid, fpsid); /* save the VFPSID */
if ((fpsid & VFPSID_SUBVERSION2_MASK) == VFP_ARCH3) {
tmp = fmrx(cr7); /* extended registers */
PCPU_SET(vfpmvfr0, tmp);
tmp = fmrx(cr6); /* extended registers */
PCPU_SET(vfpmvfr1, tmp);
}
/* initialize the coprocess 10 and 11 calls
* These are called to restore the registers and enable
* the VFP hardware.
*/
if (vfp10_uh.uh_handler == NULL) {
vfp10_uh.uh_handler = vfp_bounce;
vfp11_uh.uh_handler = vfp_bounce;
install_coproc_handler_static(10, &vfp10_uh);
install_coproc_handler_static(11, &vfp11_uh);
}
}
}
SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
/* start VFP unit, restore the vfp registers from the PCB and retry
* the instruction
*/
int
vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
{
u_int fpexc;
struct pcb *curpcb;
struct thread *vfptd;
if (!vfp_exists)
return 1; /* vfp does not exist */
fpexc = fmrx(cr8); /* read the vfp exception reg */
if (fpexc & VFPEXC_EN) {
vfptd = PCPU_GET(vfpcthread);
/* did the kernel call the vfp or exception that expect us
* to emulate the command. Newer hardware does not require
* emulation, so we don't emulate yet.
*/
#ifdef SMP
/* don't save if newer registers are on another processor */
if (vfptd /* && (vfptd == curthread) */ &&
(vfptd->td_pcb->pcb_vfpcpu == PCPU_GET(vfpcpu))
#else
/* someone did not save their registers, */
if (vfptd /* && (vfptd == curthread) */)
#endif
vfp_store(&vfptd->td_pcb->pcb_vfpstate);
fpexc &= ~VFPEXC_EN;
fmxr(cr8, fpexc); /* turn vfp hardware off */
if (vfptd == curthread) {
/* kill the process - we do not handle emulation */
killproc(curthread->td_proc, "vfp emulation");
return 1;
}
/* should not happen. someone did not save their context */
printf("vfp_bounce: vfpcthread: %p curthread: %p\n",
vfptd, curthread);
}
fpexc |= VFPEXC_EN;
fmxr(cr8, fpexc); /* enable the vfp and repeat command */
curpcb = PCPU_GET(curpcb);
/* If we were the last process to use the VFP, the process did not
* use a VFP on another processor, then the registers in the VFP
* will still be ours and are current. Eventually, we will make the
* restore smarter.
*/
vfp_restore(&curpcb->pcb_vfpstate);
#ifdef SMP
curpcb->pcb_cpu = PCPU_GET(cpu);
#endif
PCPU_SET(vfpcthread, PCPU_GET(curthread));
return 0;
}
/* vfs_store is called from from a VFP command to restore the registers and
* turn on the VFP hardware.
* Eventually we will use the information that this process was the last
* to use the VFP hardware and bypass the restore, just turn on the hardware.
*/
void
vfp_restore(struct vfp_state *vfpsave)
{
u_int vfpscr = 0;
if (vfpsave) {
__asm __volatile("ldc p10, c0, [%0], #128\n" /* d0-d31 */
#ifndef VFPv2
"ldcl p11, c0, [%0], #128\n" /* d16-d31 */
#else
"add %0, %0, #128\n" /* slip missing regs */
#endif
"ldr %1, [%0]\n" /* set old vfpscr */
"mcr p10, 7, %1, cr1, c0, 0\n"
:: "r" (vfpsave), "r" (vfpscr));
PCPU_SET(vfpcthread, PCPU_GET(curthread));
}
}
/* vfs_store is called from switch to save the vfp hardware registers
* into the pcb before switching to another process.
* we already know that the new process is different from this old
* process and that this process last used the VFP registers.
* Below we check to see if the VFP has been enabled since the last
* register save.
* This routine will exit with the VFP turned off. The next VFP user
* will trap to restore its registers and turn on the VFP hardware.
*/
void
vfp_store(struct vfp_state *vfpsave)
{
u_int tmp, vfpscr = 0;
tmp = fmrx(cr8); /* Is the vfp enabled? */
if (vfpsave && tmp & VFPEXC_EN) {
__asm __volatile("stc p11, c0, [%1], #128\n" /* d0-d31 */
#ifndef VFPv2
"stcl p11, c0, [%1], #128\n"
#else
"add %1, %1, #128\n"
#endif
"mrc p10, 7, %0, cr1, c0, 0\n"
"str %0, [%1]\n"
: "=&r" (vfpscr) : "r" (vfpsave));
}
#ifndef SMP
/* eventually we will use this information for UP also */
PCPU_SET(vfpcthread, 0);
#endif
tmp &= ~VFPEXC_EN; /* disable the vfp hardware */
fmxr(cr8 , tmp);
}
/* discard the registers at cpu_thread_free() when fpcurthread == td.
* Turn off the VFP hardware.
*/
void
vfp_discard()
{
u_int tmp = 0;
PCPU_SET(vfpcthread, 0); /* permanent forget about reg */
tmp = fmrx(cr8);
tmp &= ~VFPEXC_EN; /* turn off VFP hardware */
fmxr(cr8, tmp);
}
/* Enable the VFP hardware without restoring registers.
* Called when the registers are still in the VFP unit
*/
void
vfp_enable()
{
u_int tmp = 0;
tmp = fmrx(cr8);
tmp |= VFPEXC_EN;
fmxr(cr8 , tmp);
}

View File

@ -146,7 +146,11 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
/* Setup to release spin count in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_cspr = 0;
#ifdef ARM_TP_ADDRESS
td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS;
#else
td2->td_md.md_tp = (register_t) get_tls();
#endif
}
void
@ -369,11 +373,14 @@ int
cpu_set_user_tls(struct thread *td, void *tls_base)
{
if (td != curthread)
td->td_md.md_tp = (register_t)tls_base;
else {
td->td_md.md_tp = (register_t)tls_base;
if (td == curthread) {
critical_enter();
#ifdef ARM_TP_ADDRESS
*(register_t *)ARM_TP_ADDRESS = (register_t)tls_base;
#else
set_tls((void *)tls_base);
#endif
critical_exit();
}
return (0);
@ -485,7 +492,11 @@ arm_remap_nocache(void *addr, vm_size_t size)
for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
vaddr += PAGE_SIZE, physaddr += PAGE_SIZE, i++) {
cpu_idcache_wbinv_range(vaddr, PAGE_SIZE);
#ifdef ARM_L2_PIPT
cpu_l2cache_wbinv_range(physaddr, PAGE_SIZE);
#else
cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE);
#endif
pmap_kenter_nocache(tomap, physaddr);
cpu_tlb_flushID_SE(vaddr);
arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i %

View File

@ -118,9 +118,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];
@ -471,8 +468,7 @@ initarm(struct arm_boot_params *abp)
lastaddr = parse_boot_param(abp);
set_cpufuncs();
pcpu_init(pcpup, 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();

View File

@ -4,6 +4,7 @@ files "../at91/files.at91"
cpu CPU_ARM9
makeoptions CONF_CFLAGS=-mcpu=arm9
options PHYSADDR=0x20000000
options NO_EVENTTIMERS
# For now, just do the AT91RM9200
device at91rm9200

View File

@ -103,9 +103,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];
@ -191,8 +188,7 @@ initarm(struct arm_boot_params *abp)
boothowto = RB_VERBOSE;
lastaddr = parse_boot_param(abp);
set_cpufuncs();
pcpu_init(pcpup, 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();

View File

@ -12,3 +12,5 @@ options KERNVIRTADDR=0xc1000000 # Used in ldscript.arm
options FLASHADDR=0xD0000000
options LOADERRAMADDR=0x00000000
options STARTUP_PAGETABLE_ADDR=0x00100000
options NO_EVENTTIMERS

View File

@ -93,6 +93,7 @@
#define CPU_ID_ARCH_V5TE 0x00050000
#define CPU_ID_ARCH_V5TEJ 0x00060000
#define CPU_ID_ARCH_V6 0x00070000
#define CPU_ID_CPUID_SCHEME 0x000f0000
#define CPU_ID_VARIANT_MASK 0x00f00000
/* Next three nybbles are part number */
@ -145,12 +146,36 @@
#define CPU_ID_ARM1026EJS 0x4106a260
#define CPU_ID_ARM1136JS 0x4107b360
#define CPU_ID_ARM1136JSR1 0x4117b360
#define CPU_ID_CORTEXA8R1 0x411fc080
#define CPU_ID_CORTEXA8R2 0x412fc080
#define CPU_ID_CORTEXA8R3 0x413fc080
#define CPU_ID_CORTEXA9R1 0x411fc090
#define CPU_ID_CORTEXA9R2 0x412fc090
#define CPU_ID_SA110 0x4401a100
#define CPU_ID_SA1100 0x4401a110
#define CPU_ID_TI925T 0x54029250
#define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */
#define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */
#define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */
#define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */
/*
* LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported
* L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID.
*/
#ifdef SOC_MV_LOKIPLUS
#define CPU_ID_MV88FR571_41 0x00000000
#else
#define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */
#endif
#define CPU_ID_MV88SV581X_V6 0x560F5810 /* Marvell Sheeva 88SV581x v6 Core */
#define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */
#define CPU_ID_MV88SV584X 0x561F5840 /* Marvell Sheeva 88SV584x v6 Core */
/* Marvell's CPUIDs with ARM ID in implementor field */
#define CPU_ID_ARM_88SV581X_V6 0x410fb760 /* Marvell Sheeva 88SV581x v6 Core */
#define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */
#define CPU_ID_ARM_88SV584X 0x410FB024 /* Marvell Sheeva 88SV584x v6 Core */
#define CPU_ID_FA526 0x66015260
#define CPU_ID_FA626TE 0x66056260
#define CPU_ID_SA1110 0x6901b110
@ -191,6 +216,20 @@
#define ARM3_CTL_SHARED 0x00000002
#define ARM3_CTL_MONITOR 0x00000004
/* CPUID registers */
#define ARM_PFR0_ARM_ISA_MASK 0x0000000f
#define ARM_PFR0_THUMB_MASK 0x000000f0
#define ARM_PFR0_THUMB 0x10
#define ARM_PFR0_THUMB2 0x30
#define ARM_PFR0_JAZELLE_MASK 0x00000f00
#define ARM_PFR0_THUMBEE_MASK 0x0000f000
#define ARM_PFR1_ARMV4_MASK 0x0000000f
#define ARM_PFR1_SEC_EXT_MASK 0x000000f0
#define ARM_PFR1_MICROCTRL_MASK 0x00000f00
/*
* Post-ARM3 CP15 registers:
*
@ -244,6 +283,7 @@
#define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */
#define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */
#define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */
#define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */
#define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */
#define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE
@ -260,23 +300,24 @@
/* Xscale Core 3 only */
#define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */
/* Marvell Feroceon Extra Features Register (CP15 register 1, opcode2 0) */
#define FC_DCACHE_REPL_LOCK 0x80000000 /* Replace DCache Lock */
#define FC_DCACHE_STREAM_EN 0x20000000 /* DCache Streaming Switch */
#define FC_WR_ALLOC_EN 0x10000000 /* Enable Write Allocate */
#define FC_L2_PREF_DIS 0x01000000 /* L2 Cache Prefetch Disable */
#define FC_L2_INV_EVICT_LINE 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */
#define FC_L2CACHE_EN 0x00400000 /* L2 enable */
#define FC_ICACHE_REPL_LOCK 0x00080000 /* Replace ICache Lock */
#define FC_GLOB_HIST_REG_EN 0x00040000 /* Branch Global History Register Enable */
#define FC_BRANCH_TARG_BUF_DIS 0x00020000 /* Branch Target Buffer Disable */
#define FC_L1_PAR_ERR_EN 0x00010000 /* L1 Parity Error Enable */
/* Marvell Extra Features Register (CP15 register 1, opcode2 0) */
#define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */
#define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */
#define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */
#define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */
#define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */
#define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */
#define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */
#define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */
#define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */
#define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */
/* Cache type register definitions */
#define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */
#define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */
#define CPU_CT_S (1U << 24) /* split cache */
#define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */
#define CPU_CT_FORMAT(x) ((x) >> 29)
#define CPU_CT_CTYPE_WT 0 /* write-through */
#define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */
@ -289,6 +330,27 @@
#define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */
#define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */
#define CPU_CT_ARMV7 0x4
/* ARM v7 Cache type definitions */
#define CPUV7_CT_CTYPE_WT (1 << 31)
#define CPUV7_CT_CTYPE_WB (1 << 30)
#define CPUV7_CT_CTYPE_RA (1 << 29)
#define CPUV7_CT_CTYPE_WA (1 << 28)
#define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */
#define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */
#define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */
#define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7)
#define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7)
#define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7)
#define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7)
#define CACHE_ICACHE 1
#define CACHE_DCACHE 2
#define CACHE_SEP_CACHE 3
#define CACHE_UNI_CACHE 4
/* Fault status register definitions */
#define FAULT_TYPE_MASK 0x0f

View File

@ -130,45 +130,52 @@
.stabs __STRING(_/**/sym),1,0,0,0
#endif /* __STDC__ */
/* Exactly one of the __ARM_ARCH_*__ macros will be defined by the compiler. */
/* The _ARM_ARCH_* macros are deprecated and will be removed soon. */
/* This should be moved into another header so it can be used in
* both asm and C code. machine/asm.h cannot be included in C code. */
#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
#define _ARM_ARCH_7
#define _HAVE_ARMv7_INSTRUCTIONS 1
#endif
#if defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__)
#if defined (_HAVE_ARMv7_INSTRUCTIONS) || defined (__ARM_ARCH_6__) || \
defined (__ARM_ARCH_6J__) || defined (__ARM_ARCH_6K__) || \
defined (__ARM_ARCH_6Z__) || defined (__ARM_ARCH_6ZK__)
#define _ARM_ARCH_6
#define _HAVE_ARMv6_INSTRUCTIONS 1
#endif
#if defined (_ARM_ARCH_6) || defined (__ARM_ARCH_5__) || \
defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5TE__) || \
#if defined (_HAVE_ARMv6_INSTRUCTIONS) || defined (__ARM_ARCH_5TE__) || \
defined (__ARM_ARCH_5TEJ__) || defined (__ARM_ARCH_5E__)
#define _ARM_ARCH_5
#endif
#if defined (_ARM_ARCH_6) || defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__) || defined(__ARM_ARCH_5E__)
#define _ARM_ARCH_5E
#define _HAVE_ARMv5E_INSTRUCTIONS 1
#endif
#if defined (_ARM_ARCH_5) || defined (__ARM_ARCH_4T__)
#if defined (_HAVE_ARMv5E_INSTRUCTIONS) || defined (__ARM_ARCH_5__) || \
defined (__ARM_ARCH_5T__)
#define _ARM_ARCH_5
#define _HAVE_ARMv5_INSTRUCTIONS 1
#endif
#if defined (_HAVE_ARMv5_INSTRUCTIONS) || defined (__ARM_ARCH_4T__)
#define _ARM_ARCH_4T
#define _HAVE_ARMv4T_INSTRUCTIONS 1
#endif
/* FreeBSD requires ARMv4, so this is always set. */
#define _HAVE_ARMv4_INSTRUCTIONS 1
#if defined (_ARM_ARCH_4T)
#if defined (_HAVE_ARMv4T_INSTRUCTIONS)
# define RET bx lr
# define RETeq bxeq lr
# define RETne bxne lr
# ifdef __STDC__
# define RETc(c) bx##c lr
# else
# define RETc(c) bx/**/c lr
# endif
# define RETc(c) bx##c lr
#else
# define RET mov pc, lr
# define RETeq moveq pc, lr
# define RETne movne pc, lr
# ifdef __STDC__
# define RETc(c) mov##c pc, lr
# else
# define RETc(c) mov/**/c pc, lr
# endif
# define RETc(c) mov##c pc, lr
#endif
#endif /* !_MACHINE_ASM_H_ */

View File

@ -40,9 +40,12 @@
#ifndef _MACHINE_ASMACROS_H_
#define _MACHINE_ASMACROS_H_
#include <machine/asm.h>
#ifdef _KERNEL
#ifdef LOCORE
#include "opt_global.h"
/*
* ASM macros for pushing and pulling trapframes from the stack
@ -58,7 +61,7 @@
* NOTE: r13 and r14 are stored separately as a work around for the
* SA110 rev 2 STM^ bug
*/
#ifdef ARM_TP_ADDRESS
#define PUSHFRAME \
str lr, [sp, #-4]!; /* Push the return address */ \
sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
@ -73,12 +76,24 @@
str r1, [r0]; \
mov r1, #0xffffffff; \
str r1, [r0, #4];
#else
#define PUSHFRAME \
str lr, [sp, #-4]!; /* Push the return address */ \
sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
stmia sp, {r0-r12}; /* Push the user mode registers */ \
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
mov r0, r0; /* NOP for previous instruction */ \
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
str r0, [sp, #-4]!;
#endif
/*
* PULLFRAME - macro to pull a trap frame from the stack in the current mode
* Since the current mode is used, the SVC lr field is ignored.
*/
#ifdef ARM_TP_ADDRESS
#define PULLFRAME \
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
msr spsr_all, r0; \
@ -86,6 +101,16 @@
mov r0, r0; /* NOP for previous instruction */ \
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
ldr lr, [sp], #0x0004; /* Pull the return address */
#else
#define PULLFRAME \
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
msr spsr_all, r0; \
clrex; \
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
mov r0, r0; /* NOP for previous instruction */ \
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
ldr lr, [sp], #0x0004; /* Pull the return address */
#endif
/*
* PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
@ -97,7 +122,7 @@
* NOTE: r13 and r14 are stored separately as a work around for the
* SA110 rev 2 STM^ bug
*/
#ifdef ARM_TP_ADDRESS
#define PUSHFRAMEINSVC \
stmdb sp, {r0-r3}; /* Save 4 registers */ \
mov r0, lr; /* Save xxx32 r14 */ \
@ -132,6 +157,30 @@
strhi r3, [r0, #16]; /* the RAS_START location. */ \
mrs r0, spsr_all; \
str r0, [sp, #-4]!
#else
#define PUSHFRAMEINSVC \
stmdb sp, {r0-r3}; /* Save 4 registers */ \
mov r0, lr; /* Save xxx32 r14 */ \
mov r1, sp; /* Save xxx32 sp */ \
mrs r3, spsr; /* Save xxx32 spsr */ \
mrs r2, cpsr; /* Get the CPSR */ \
bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \
orr r2, r2, #(PSR_SVC32_MODE); \
msr cpsr_c, r2; /* Punch into SVC mode */ \
mov r2, sp; /* Save SVC sp */ \
str r0, [sp, #-4]!; /* Push return address */ \
str lr, [sp, #-4]!; /* Push SVC lr */ \
str r2, [sp, #-4]!; /* Push SVC sp */ \
msr spsr_all, r3; /* Restore correct spsr */ \
ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
stmia sp, {r0-r12}; /* Push the user mode registers */ \
add r0, sp, #(4*13); /* Adjust the stack pointer */ \
stmia r0, {r13-r14}^; /* Push the user mode registers */ \
mov r0, r0; /* NOP for previous instruction */ \
mrs r0, spsr_all; /* Put the SPSR on the stack */ \
str r0, [sp, #-4]!
#endif
/*
* PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
@ -140,6 +189,7 @@
* exit.
*/
#ifdef ARM_TP_ADDRESS
#define PULLFRAMEFROMSVCANDEXIT \
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
msr spsr_all, r0; /* restore SPSR */ \
@ -147,6 +197,16 @@
mov r0, r0; /* NOP for previous instruction */ \
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
#else
#define PULLFRAMEFROMSVCANDEXIT \
ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \
msr spsr_all, r0; /* restore SPSR */ \
clrex; \
ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
mov r0, r0; /* NOP for previous instruction */ \
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
#endif
#define DATA(name) \
.data ; \
@ -155,9 +215,20 @@
.type name, %object ; \
name:
#define EMPTY
#ifdef _ARM_ARCH_6
#define AST_LOCALS
#define GET_CURTHREAD_PTR(tmp) \
mrc p15, 0, tmp, c13, c0, 4; \
add tmp, tmp, #(PC_CURTHREAD)
#else
#define AST_LOCALS ;\
.Lcurthread: ;\
.word _C_LABEL(__pcpu) + PC_CURTHREAD
#define GET_CURTHREAD_PTR(tmp) \
ldr tmp, .Lcurthread
#endif
#define DO_AST \
ldr r0, [sp] /* Get the SPSR from stack */ ;\
mrs r4, cpsr /* save CPSR */ ;\
@ -167,7 +238,7 @@
teq r0, #(PSR_USR32_MODE) ;\
bne 2f /* Nope, get out now */ ;\
bic r4, r4, #(I32_bit|F32_bit) ;\
1: ldr r5, .Lcurthread ;\
1: GET_CURTHREAD_PTR(r5) ;\
ldr r5, [r5] ;\
ldr r1, [r5, #(TD_FLAGS)] ;\
and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED) ;\
@ -181,11 +252,6 @@
b 1b ;\
2:
#define AST_LOCALS ;\
.Lcurthread: ;\
.word _C_LABEL(__pcpu) + PC_CURTHREAD
#endif /* LOCORE */
#endif /* _KERNEL */

View File

@ -39,17 +39,17 @@
#ifndef _MACHINE_ATOMIC_H_
#define _MACHINE_ATOMIC_H_
#ifndef _LOCORE
#include <sys/types.h>
#ifndef _KERNEL
#include <machine/sysarch.h>
#else
#include <machine/cpuconf.h>
#endif
#define mb()
#define wmb()
#define rmb()
#define mb()
#define wmb()
#define rmb()
#ifndef I32_bit
#define I32_bit (1 << 7) /* IRQ disable */
@ -58,6 +58,356 @@
#define F32_bit (1 << 6) /* FIQ disable */
#endif
/*
* It would be nice to use _HAVE_ARMv6_INSTRUCTIONS from machine/asm.h
* here, but that header can't be included here because this is C
* code. I would like to move the _HAVE_ARMv6_INSTRUCTIONS definition
* out of asm.h so it can be used in both asm and C code. - kientzle@
*/
#if defined (__ARM_ARCH_7__) || \
defined (__ARM_ARCH_7A__) || \
defined (__ARM_ARCH_6__) || \
defined (__ARM_ARCH_6J__) || \
defined (__ARM_ARCH_6K__) || \
defined (__ARM_ARCH_6Z__) || \
defined (__ARM_ARCH_6ZK__)
static __inline void
__do_dmb(void)
{
#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
__asm __volatile("dmb" : : : "memory");
#else
__asm __volatile("mcr p15, 0, r0, c7, c10, 5" : : : "memory");
#endif
}
#define ATOMIC_ACQ_REL_LONG(NAME) \
static __inline void \
atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \
{ \
atomic_##NAME##_long(p, v); \
__do_dmb(); \
} \
\
static __inline void \
atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \
{ \
__do_dmb(); \
atomic_##NAME##_long(p, v); \
}
#define ATOMIC_ACQ_REL(NAME, WIDTH) \
static __inline void \
atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
{ \
atomic_##NAME##_##WIDTH(p, v); \
__do_dmb(); \
} \
\
static __inline void \
atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
{ \
__do_dmb(); \
atomic_##NAME##_##WIDTH(p, v); \
}
static __inline void
atomic_set_32(volatile uint32_t *address, uint32_t setmask)
{
uint32_t tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"orr %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
, "+r" (address), "+r" (setmask) : : "memory");
}
static __inline void
atomic_set_long(volatile u_long *address, u_long setmask)
{
u_long tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"orr %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
, "+r" (address), "+r" (setmask) : : "memory");
}
static __inline void
atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
{
uint32_t tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"bic %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (address), "+r" (setmask) : : "memory");
}
static __inline void
atomic_clear_long(volatile u_long *address, u_long setmask)
{
u_long tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"bic %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (address), "+r" (setmask) : : "memory");
}
static __inline u_int32_t
atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
uint32_t ret;
__asm __volatile("1: ldrex %0, [%1]\n"
"cmp %0, %2\n"
"movne %0, #0\n"
"bne 2f\n"
"strex %0, %3, [%1]\n"
"cmp %0, #0\n"
"bne 1b\n"
"moveq %0, #1\n"
"2:"
: "=&r" (ret)
,"+r" (p), "+r" (cmpval), "+r" (newval) : : "memory");
return (ret);
}
static __inline u_long
atomic_cmpset_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
{
u_long ret;
__asm __volatile("1: ldrex %0, [%1]\n"
"cmp %0, %2\n"
"movne %0, #0\n"
"bne 2f\n"
"strex %0, %3, [%1]\n"
"cmp %0, #0\n"
"bne 1b\n"
"moveq %0, #1\n"
"2:"
: "=&r" (ret)
,"+r" (p), "+r" (cmpval), "+r" (newval) : : "memory");
return (ret);
}
static __inline u_int32_t
atomic_cmpset_acq_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
u_int32_t ret = atomic_cmpset_32(p, cmpval, newval);
__do_dmb();
return (ret);
}
static __inline u_long
atomic_cmpset_acq_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
{
u_long ret = atomic_cmpset_long(p, cmpval, newval);
__do_dmb();
return (ret);
}
static __inline u_int32_t
atomic_cmpset_rel_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
{
__do_dmb();
return (atomic_cmpset_32(p, cmpval, newval));
}
static __inline u_long
atomic_cmpset_rel_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
{
__do_dmb();
return (atomic_cmpset_long(p, cmpval, newval));
}
static __inline void
atomic_add_32(volatile u_int32_t *p, u_int32_t val)
{
uint32_t tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"add %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
}
static __inline void
atomic_add_long(volatile u_long *p, u_long val)
{
u_long tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"add %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
}
static __inline void
atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
{
uint32_t tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"sub %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
}
static __inline void
atomic_subtract_long(volatile u_long *p, u_long val)
{
u_long tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%2]\n"
"sub %0, %0, %3\n"
"strex %1, %0, [%2]\n"
"cmp %1, #0\n"
"bne 1b\n"
: "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
}
ATOMIC_ACQ_REL(clear, 32)
ATOMIC_ACQ_REL(add, 32)
ATOMIC_ACQ_REL(subtract, 32)
ATOMIC_ACQ_REL(set, 32)
ATOMIC_ACQ_REL_LONG(clear)
ATOMIC_ACQ_REL_LONG(add)
ATOMIC_ACQ_REL_LONG(subtract)
ATOMIC_ACQ_REL_LONG(set)
#undef ATOMIC_ACQ_REL
#undef ATOMIC_ACQ_REL_LONG
static __inline uint32_t
atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
{
uint32_t tmp = 0, tmp2 = 0, ret = 0;
__asm __volatile("1: ldrex %0, [%3]\n"
"add %1, %0, %4\n"
"strex %2, %1, [%3]\n"
"cmp %2, #0\n"
"bne 1b\n"
: "+r" (ret), "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
return (ret);
}
static __inline uint32_t
atomic_readandclear_32(volatile u_int32_t *p)
{
uint32_t ret, tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%3]\n"
"mov %1, #0\n"
"strex %2, %1, [%3]\n"
"cmp %2, #0\n"
"bne 1b\n"
: "=r" (ret), "=&r" (tmp), "+r" (tmp2)
,"+r" (p) : : "memory");
return (ret);
}
static __inline uint32_t
atomic_load_acq_32(volatile uint32_t *p)
{
uint32_t v;
v = *p;
__do_dmb();
return (v);
}
static __inline void
atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
{
__do_dmb();
*p = v;
}
static __inline u_long
atomic_fetchadd_long(volatile u_long *p, u_long val)
{
u_long tmp = 0, tmp2 = 0, ret = 0;
__asm __volatile("1: ldrex %0, [%3]\n"
"add %1, %0, %4\n"
"strex %2, %1, [%3]\n"
"cmp %2, #0\n"
"bne 1b\n"
: "+r" (ret), "=&r" (tmp), "+r" (tmp2)
,"+r" (p), "+r" (val) : : "memory");
return (ret);
}
static __inline u_long
atomic_readandclear_long(volatile u_long *p)
{
u_long ret, tmp = 0, tmp2 = 0;
__asm __volatile("1: ldrex %0, [%3]\n"
"mov %1, #0\n"
"strex %2, %1, [%3]\n"
"cmp %2, #0\n"
"bne 1b\n"
: "=r" (ret), "=&r" (tmp), "+r" (tmp2)
,"+r" (p) : : "memory");
return (ret);
}
static __inline u_long
atomic_load_acq_long(volatile u_long *p)
{
u_long v;
v = *p;
__do_dmb();
return (v);
}
static __inline void
atomic_store_rel_long(volatile u_long *p, u_long v)
{
__do_dmb();
*p = v;
}
#else /* < armv6 */
#define __with_interrupts_disabled(expr) \
do { \
u_int cpsr_save, tmp; \
@ -287,6 +637,83 @@ atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
#endif /* _KERNEL */
static __inline uint32_t
atomic_readandclear_32(volatile u_int32_t *p)
{
return (__swp(0, p));
}
#define atomic_cmpset_rel_32 atomic_cmpset_32
#define atomic_cmpset_acq_32 atomic_cmpset_32
#define atomic_set_rel_32 atomic_set_32
#define atomic_set_acq_32 atomic_set_32
#define atomic_clear_rel_32 atomic_clear_32
#define atomic_clear_acq_32 atomic_clear_32
#define atomic_add_rel_32 atomic_add_32
#define atomic_add_acq_32 atomic_add_32
#define atomic_subtract_rel_32 atomic_subtract_32
#define atomic_subtract_acq_32 atomic_subtract_32
#define atomic_store_rel_32 atomic_store_32
#define atomic_store_rel_long atomic_store_long
#define atomic_load_acq_32 atomic_load_32
#define atomic_load_acq_long atomic_load_long
#undef __with_interrupts_disabled
static __inline void
atomic_add_long(volatile u_long *p, u_long v)
{
atomic_add_32((volatile uint32_t *)p, v);
}
static __inline void
atomic_clear_long(volatile u_long *p, u_long v)
{
atomic_clear_32((volatile uint32_t *)p, v);
}
static __inline int
atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe)
{
return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe));
}
static __inline u_long
atomic_fetchadd_long(volatile u_long *p, u_long v)
{
return (atomic_fetchadd_32((volatile uint32_t *)p, v));
}
static __inline void
atomic_readandclear_long(volatile u_long *p)
{
atomic_readandclear_32((volatile uint32_t *)p);
}
static __inline void
atomic_set_long(volatile u_long *p, u_long v)
{
atomic_set_32((volatile uint32_t *)p, v);
}
static __inline void
atomic_subtract_long(volatile u_long *p, u_long v)
{
atomic_subtract_32((volatile uint32_t *)p, v);
}
#endif /* Arch >= v6 */
static __inline int
atomic_load_32(volatile uint32_t *v)
{
@ -300,88 +727,57 @@ atomic_store_32(volatile uint32_t *dst, uint32_t src)
*dst = src;
}
static __inline uint32_t
atomic_readandclear_32(volatile u_int32_t *p)
static __inline int
atomic_load_long(volatile u_long *v)
{
return (__swp(0, p));
return (*v);
}
#undef __with_interrupts_disabled
static __inline void
atomic_store_long(volatile u_long *dst, u_long src)
{
*dst = src;
}
#endif /* _LOCORE */
#define atomic_add_long(p, v) \
atomic_add_32((volatile u_int *)(p), (u_int)(v))
#define atomic_add_acq_long atomic_add_long
#define atomic_add_rel_long atomic_add_long
#define atomic_subtract_long(p, v) \
atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
#define atomic_subtract_acq_long atomic_subtract_long
#define atomic_subtract_rel_long atomic_subtract_long
#define atomic_clear_long(p, v) \
atomic_clear_32((volatile u_int *)(p), (u_int)(v))
#define atomic_clear_acq_long atomic_clear_long
#define atomic_clear_rel_long atomic_clear_long
#define atomic_set_long(p, v) \
atomic_set_32((volatile u_int *)(p), (u_int)(v))
#define atomic_set_acq_long atomic_set_long
#define atomic_set_rel_long atomic_set_long
#define atomic_cmpset_long(dst, old, new) \
atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#define atomic_cmpset_acq_long atomic_cmpset_long
#define atomic_cmpset_rel_long atomic_cmpset_long
#define atomic_fetchadd_long(p, v) \
atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
#define atomic_readandclear_long(p) \
atomic_readandclear_long((volatile u_int *)(p))
#define atomic_load_long(p) \
atomic_load_32((volatile u_int *)(p))
#define atomic_load_acq_long atomic_load_long
#define atomic_store_rel_long(p, v) \
atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
#define atomic_clear_ptr atomic_clear_32
#define atomic_set_ptr atomic_set_32
#define atomic_cmpset_ptr(dst, old, new) \
atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
#define atomic_cmpset_ptr atomic_cmpset_32
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
#define atomic_store_ptr atomic_store_32
#define atomic_store_rel_ptr atomic_store_ptr
#define atomic_add_int atomic_add_32
#define atomic_add_acq_int atomic_add_int
#define atomic_add_rel_int atomic_add_int
#define atomic_add_acq_int atomic_add_acq_32
#define atomic_add_rel_int atomic_add_rel_32
#define atomic_subtract_int atomic_subtract_32
#define atomic_subtract_acq_int atomic_subtract_int
#define atomic_subtract_rel_int atomic_subtract_int
#define atomic_subtract_acq_int atomic_subtract_acq_32
#define atomic_subtract_rel_int atomic_subtract_rel_32
#define atomic_clear_int atomic_clear_32
#define atomic_clear_acq_int atomic_clear_int
#define atomic_clear_rel_int atomic_clear_int
#define atomic_clear_acq_int atomic_clear_acq_32
#define atomic_clear_rel_int atomic_clear_rel_32
#define atomic_set_int atomic_set_32
#define atomic_set_acq_int atomic_set_int
#define atomic_set_rel_int atomic_set_int
#define atomic_set_acq_int atomic_set_acq_32
#define atomic_set_rel_int atomic_set_rel_32
#define atomic_cmpset_int atomic_cmpset_32
#define atomic_cmpset_acq_int atomic_cmpset_int
#define atomic_cmpset_rel_int atomic_cmpset_int
#define atomic_cmpset_acq_int atomic_cmpset_acq_32
#define atomic_cmpset_rel_int atomic_cmpset_rel_32
#define atomic_fetchadd_int atomic_fetchadd_32
#define atomic_readandclear_int atomic_readandclear_32
#define atomic_load_acq_int atomic_load_32
#define atomic_store_rel_int atomic_store_32
#define atomic_add_acq_32 atomic_add_32
#define atomic_add_rel_32 atomic_add_32
#define atomic_subtract_acq_32 atomic_subtract_32
#define atomic_subtract_rel_32 atomic_subtract_32
#define atomic_clear_acq_32 atomic_clear_32
#define atomic_clear_rel_32 atomic_clear_32
#define atomic_set_acq_32 atomic_set_32
#define atomic_set_rel_32 atomic_set_32
#define atomic_cmpset_acq_32 atomic_cmpset_32
#define atomic_cmpset_rel_32 atomic_cmpset_32
#define atomic_load_acq_32 atomic_load_32
#define atomic_store_rel_32 atomic_store_32
#define atomic_load_acq_int atomic_load_acq_32
#define atomic_store_rel_int atomic_store_rel_32
#endif /* _MACHINE_ATOMIC_H_ */

View File

@ -63,7 +63,9 @@
defined(CPU_XSCALE_PXA2X0) + \
defined(CPU_FA526) + \
defined(CPU_FA626TE) + \
defined(CPU_XSCALE_IXP425))
defined(CPU_XSCALE_IXP425)) + \
defined(CPU_CORTEXA) + \
defined(CPU_MV_PJ4B)
/*
* Step 2: Determine which ARM architecture versions are configured.
@ -86,18 +88,26 @@
#define ARM_ARCH_5 0
#endif
#if defined(CPU_ARM11)
#if !defined(ARM_ARCH_6)
#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B)
#define ARM_ARCH_6 1
#else
#define ARM_ARCH_6 0
#endif
#endif
#define ARM_NARCH (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6)
#if defined(CPU_CORTEXA)
#define ARM_ARCH_7A 1
#else
#define ARM_ARCH_7A 0
#endif
#define ARM_NARCH (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6 | ARM_ARCH_7A)
#if ARM_NARCH == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NARCH is 0
#endif
#if ARM_ARCH_5 || ARM_ARCH_6
#if ARM_ARCH_5 || ARM_ARCH_6 || ARM_ARCH_7A
/*
* We could support Thumb code on v4T, but the lack of clean interworking
* makes that hard.
@ -113,6 +123,10 @@
*
* ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
*
* ARM_MMU_V6 ARMv6 MMU.
*
* ARM_MMU_V7 ARMv7 MMU.
*
* ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
* ARM MMU, but has no write-through cache mode.
*
@ -128,13 +142,25 @@
#if (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) || \
defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_FA526) || \
defined(CPU_ARM10) || defined(CPU_FA526) || \
defined(CPU_FA626TE))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0
#endif
#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B)
#define ARM_MMU_V6 1
#else
#define ARM_MMU_V6 0
#endif
#if defined(CPU_CORTEXA)
#define ARM_MMU_V7 1
#else
#define ARM_MMU_V7 0
#endif
#if (defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
defined(CPU_IXP12X0))
#define ARM_MMU_SA1 1
@ -150,8 +176,8 @@
#define ARM_MMU_XSCALE 0
#endif
#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + \
ARM_MMU_SA1 + ARM_MMU_XSCALE)
#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_V6 + \
ARM_MMU_V7 + ARM_MMU_SA1 + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NMMUS is 0
#endif

View File

@ -176,6 +176,8 @@ extern u_int cputype;
#define cpu_faultstatus() cpufuncs.cf_faultstatus()
#define cpu_faultaddress() cpufuncs.cf_faultaddress()
#ifndef SMP
#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
@ -183,6 +185,51 @@ extern u_int cputype;
#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
#else
void tlb_broadcast(int);
#ifdef CPU_CORTEXA
#define TLB_BROADCAST /* No need to explicitely send an IPI */
#else
#define TLB_BROADCAST tlb_broadcast(7)
#endif
#define cpu_tlb_flushID() do { \
cpufuncs.cf_tlb_flushID(); \
TLB_BROADCAST; \
} while(0)
#define cpu_tlb_flushID_SE(e) do { \
cpufuncs.cf_tlb_flushID_SE(e); \
TLB_BROADCAST; \
} while(0)
#define cpu_tlb_flushI() do { \
cpufuncs.cf_tlb_flushI(); \
TLB_BROADCAST; \
} while(0)
#define cpu_tlb_flushI_SE(e) do { \
cpufuncs.cf_tlb_flushI_SE(e); \
TLB_BROADCAST; \
} while(0)
#define cpu_tlb_flushD() do { \
cpufuncs.cf_tlb_flushD(); \
TLB_BROADCAST; \
} while(0)
#define cpu_tlb_flushD_SE(e) do { \
cpufuncs.cf_tlb_flushD_SE(e); \
TLB_BROADCAST; \
} while(0)
#endif
#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
@ -222,10 +269,12 @@ int cpufunc_null_fixup (void *);
int early_abort_fixup (void *);
int late_abort_fixup (void *);
u_int cpufunc_id (void);
u_int cpufunc_cpuid (void);
u_int cpufunc_control (u_int clear, u_int bic);
void cpufunc_domains (u_int domains);
u_int cpufunc_faultstatus (void);
u_int cpufunc_faultaddress (void);
u_int cpu_pfr (int);
#ifdef CPU_ARM3
u_int arm3_control (u_int clear, u_int bic);
@ -413,8 +462,9 @@ void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t);
void sheeva_l2cache_wbinv_all (void);
#endif
#ifdef CPU_ARM11
#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
void arm11_setttb (u_int);
void arm11_sleep (int);
void arm11_tlb_flushID_SE (u_int);
void arm11_tlb_flushI_SE (u_int);
@ -428,6 +478,51 @@ void arm11_tlb_flushD (void);
void arm11_tlb_flushD_SE (u_int va);
void arm11_drain_writebuf (void);
void pj4b_setttb (u_int);
void pj4b_icache_sync_range (vm_offset_t, vm_size_t);
void pj4b_dcache_wbinv_range (vm_offset_t, vm_size_t);
void pj4b_dcache_inv_range (vm_offset_t, vm_size_t);
void pj4b_dcache_wb_range (vm_offset_t, vm_size_t);
void pj4b_idcache_wbinv_range (vm_offset_t, vm_size_t);
void pj4b_drain_readbuf (void);
void pj4b_flush_brnchtgt_all (void);
void pj4b_flush_brnchtgt_va (u_int);
void pj4b_sleep (int);
void armv6_icache_sync_all (void);
void armv6_dcache_wbinv_all (void);
void armv6_idcache_wbinv_all (void);
void armv7_setttb (u_int);
void armv7_tlb_flushID (void);
void armv7_tlb_flushID_SE (u_int);
void armv7_icache_sync_range (vm_offset_t, vm_size_t);
void armv7_idcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_dcache_wbinv_all (void);
void armv7_idcache_wbinv_all (void);
void armv7_dcache_wbinv_range (vm_offset_t, vm_size_t);
void armv7_dcache_inv_range (vm_offset_t, vm_size_t);
void armv7_dcache_wb_range (vm_offset_t, vm_size_t);
void armv7_cpu_sleep (int);
void armv7_setup (char *string);
void armv7_context_switch (void);
void armv7_drain_writebuf (void);
void armv7_sev (void);
u_int armv7_auxctrl (u_int, u_int);
void pj4bv7_setup (char *string);
void pj4bv6_setup (char *string);
void pj4b_config (void);
int get_core_id (void);
void armadaxp_idcache_wbinv_all (void);
void cortexa_setup (char *);
#endif
#if defined(CPU_ARM9E) || defined (CPU_ARM10)
@ -445,7 +540,7 @@ void armv5_ec_idcache_wbinv_all(void);
void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
#endif
#if defined (CPU_ARM10) || defined (CPU_ARM11)
#if defined (CPU_ARM10)
void armv5_setttb(u_int);
void armv5_icache_sync_all(void);
@ -636,6 +731,10 @@ extern int arm_pcache_unified;
extern int arm_dcache_align;
extern int arm_dcache_align_mask;
extern u_int arm_cache_level;
extern u_int arm_cache_loc;
extern u_int arm_cache_type[14];
#endif /* _KERNEL */
#endif /* _MACHINE_CPUFUNC_H_ */

View File

@ -66,12 +66,19 @@ typedef struct fp_extended_precision fp_reg_t;
* This needs to move and be hidden from userland.
*/
#ifdef ARM_VFP_SUPPORT
struct vfp_state {
u_int64_t reg[32];
u_int32_t fpscr;
};
#else
struct fpe_sp_state {
unsigned int fp_flags;
unsigned int fp_sr;
unsigned int fp_cr;
fp_reg_t fp_registers[16];
};
#endif
/*
* Type for a saved FP context, if we want to translate the context to a

View File

@ -50,6 +50,8 @@
#elif defined(CPU_ARM9) || defined(SOC_MV_KIRKWOOD) || \
defined(CPU_XSCALE_IXP435)
#define NIRQ 64
#elif defined(CPU_CORTEXA)
#define NIRQ 128
#else
#define NIRQ 32
#endif
@ -63,4 +65,7 @@ void arm_setup_irqhandler(const char *, int (*)(void*), void (*)(void*),
void *, int, int, void **);
int arm_remove_irqhandler(int, void *);
extern void (*arm_post_filter)(void *);
void gic_init_secondary(void);
#endif /* _MACHINE_INTR_H */

View File

@ -62,6 +62,7 @@ enum cpu_class {
CPU_CLASS_ARM9EJS,
CPU_CLASS_ARM10E,
CPU_CLASS_ARM10EJ,
CPU_CLASS_CORTEXA,
CPU_CLASS_SA1,
CPU_CLASS_XSCALE,
CPU_CLASS_ARM11J,

View File

@ -56,17 +56,25 @@
#define MACHINE "arm"
#endif
#ifndef MACHINE_ARCH
#ifdef __FreeBSD_ARCH_armv6__
#ifdef __ARMEB__
#define MACHINE_ARCH "armv6eb"
#else
#define MACHINE_ARCH "armv6"
#endif
#else
#ifdef __ARMEB__
#define MACHINE_ARCH "armeb"
#else
#define MACHINE_ARCH "arm"
#endif
#endif
#endif
#define MID_MACHINE MID_ARM6
#if defined(SMP) || defined(KLD_MODULE)
#ifndef MAXCPU
#define MAXCPU 2
#define MAXCPU 4
#endif
#else
#define MAXCPU 1

View File

@ -80,7 +80,12 @@ struct pcb {
#define PCB_NOALIGNFLT 0x00000002
caddr_t pcb_onfault; /* On fault handler */
struct pcb_arm32 un_32;
#ifdef ARM_VFP_SUPPORT
struct vfp_state pcb_vfpstate; /* VP/NEON state */
u_int pcb_vfpcpu; /* VP/NEON last cpu */
#else
struct fpe_sp_state pcb_fpstate; /* Floating Point state */
#endif
};
/*

View File

@ -32,6 +32,7 @@
#ifdef _KERNEL
#include <machine/cpuconf.h>
#include <machine/frame.h>
#define ALT_STACK_SIZE 128
@ -40,7 +41,18 @@ struct vmspace;
#endif /* _KERNEL */
#define PCPU_MD_FIELDS
#ifdef ARM_VFP_SUPPORT
#define PCPU_MD_FIELDS \
unsigned int pc_cpu; \
unsigned int pc_vfpsid; \
unsigned int pc_vfpmvfr0; \
unsigned int pc_vfpmvfr1; \
struct thread *pc_vfpcthread; \
struct pmap *pc_curpmap;
#else
#define PCPU_MD_FIELDS
#endif
#ifdef _KERNEL
@ -48,19 +60,50 @@ struct pcb;
struct pcpu;
extern struct pcpu *pcpup;
extern struct pcpu __pcpu;
#if ARM_ARCH_6 || ARM_ARCH_7A
/* or ARM_TP_ADDRESS mark REMOVE ME NOTE */
static inline struct pcpu *
get_pcpu(void)
{
void *pcpu;
#define PCPU_GET(member) (__pcpu.pc_ ## member)
__asm __volatile("mrc p15, 0, %0, c13, c0, 4" : "=r" (pcpu));
return (pcpu);
}
/*
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
#define PCPU_ADD(member, value) (__pcpu.pc_ ## member += (value))
static inline void
set_pcpu(void *pcpu)
{
__asm __volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (pcpu));
}
static inline void *
get_tls(void)
{
void *tls;
__asm __volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tls));
return (tls);
}
static inline void
set_tls(void *tls)
{
__asm __volatile("mcr p15, 0, %0, c13, c0, 3" : : "r" (tls));
}
#else
#define get_pcpu() pcpup
#endif
#define PCPU_GET(member) (get_pcpu()->pc_ ## member)
#define PCPU_ADD(member, value) (get_pcpu()->pc_ ## member += (value))
#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&__pcpu.pc_ ## member)
#define PCPU_SET(member,value) (__pcpu.pc_ ## member = (value))
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
void pcpu0_init(void);
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */

38
sys/arm/include/pl310.h Normal file
View File

@ -0,0 +1,38 @@
/*-
* Copyright (c) 2012 Olivier Houchard. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $FreeBSD$
*/
#ifndef PL310_H_
#define PL310_H_
struct pl310_softc {
struct resource *sc_mem_res;
};
void platform_init_pl310(struct pl310_softc *sc);
#endif /* PL310_H_ */

View File

@ -55,9 +55,30 @@
/*
* Pte related macros
*/
#define PTE_NOCACHE 0
#define PTE_CACHE 1
#define PTE_PAGETABLE 2
#if ARM_ARCH_6 || ARM_ARCH_7A
#ifdef SMP
#define PTE_NOCACHE 2
#else
#define PTE_NOCACHE 1
#endif
#define PTE_CACHE 4
#define PTE_DEVICE 2
#define PTE_PAGETABLE 4
#else
#define PTE_NOCACHE 1
#define PTE_CACHE 2
#define PTE_PAGETABLE 3
#endif
enum mem_type {
STRONG_ORD = 0,
DEVICE_NOSHARE,
DEVICE_SHARE,
NRML_NOCACHE,
NRML_IWT_OWT,
NRML_IWB_OWB,
NRML_IWBA_OWBA
};
#ifndef LOCORE
@ -209,6 +230,7 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
void *pmap_kenter_temp(vm_paddr_t pa, int i);
@ -225,6 +247,7 @@ void
pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
int cache);
int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
int pmap_dmap_iscurrent(pmap_t pmap);
/*
* Definitions for MMU domains
@ -251,18 +274,11 @@ extern int pmap_needs_pte_sync;
* We use these macros since we use different bits on different processor
* models.
*/
#define L1_S_PROT_U (L1_S_AP(AP_U))
#define L1_S_PROT_W (L1_S_AP(AP_W))
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
L1_S_XSCALE_TEX(TEX_XSCALE_T))
#define L2_L_PROT_U (L2_AP(AP_U))
#define L2_L_PROT_W (L2_AP(AP_W))
#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
L2_XSCALE_L_TEX(TEX_XSCALE_T))
@ -293,6 +309,11 @@ extern int pmap_needs_pte_sync;
/*
* User-visible names for the ones that vary with MMU class.
*/
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
#define L2_AP(x) (L2_AP0(x))
#else
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
#endif
#if ARM_NMMUS > 1
/* More than one MMU class configured; use variables. */
@ -334,6 +355,77 @@ extern int pmap_needs_pte_sync;
#define L1_C_PROTO L1_C_PROTO_xscale
#define L2_S_PROTO L2_S_PROTO_xscale
#elif (ARM_MMU_V6 + ARM_MMU_V7) != 0
#define L2_S_PROT_U (L2_AP0(2)) /* user access */
#define L2_S_PROT_R (L2_APX|L2_AP0(1)) /* read access */
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R)
#define L2_S_WRITABLE(pte) (!(pte & L2_APX))
#ifndef SMP
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C)
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C)
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C)
#else
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
#endif /* SMP */
#define L1_S_PROTO (L1_TYPE_S)
#define L1_C_PROTO (L1_TYPE_C)
#define L2_S_PROTO (L2_TYPE_S)
#ifndef SMP
#define ARM_L1S_STRONG_ORD (0)
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1))
#define ARM_L1S_NRML_IWT_OWT (L1_S_C)
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B)
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B)
#define ARM_L2L_STRONG_ORD (0)
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
#define ARM_L2L_DEVICE_SHARE (L2_B)
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1))
#define ARM_L2L_NRML_IWT_OWT (L2_C)
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B)
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B)
#define ARM_L2S_STRONG_ORD (0)
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
#define ARM_L2S_DEVICE_SHARE (L2_B)
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1))
#define ARM_L2S_NRML_IWT_OWT (L2_C)
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B)
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B)
#else
#define ARM_L1S_STRONG_ORD (0)
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED)
#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED)
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED)
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
#define ARM_L2L_STRONG_ORD (0)
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
#define ARM_L2L_DEVICE_SHARE (L2_B)
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED)
#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED)
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
#define ARM_L2S_STRONG_ORD (0)
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
#define ARM_L2S_DEVICE_SHARE (L2_B)
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED)
#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED)
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
#endif /* SMP */
#endif /* ARM_NMMUS > 1 */
#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
@ -350,14 +442,41 @@ extern int pmap_needs_pte_sync;
* These macros return various bits based on kernel/user and protection.
* Note that the compiler will usually fold these at compile time.
*/
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
#define L1_S_PROT_U (L1_S_AP(AP_U))
#define L1_S_PROT_W (L1_S_AP(AP_W))
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W)
#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
#define L2_L_PROT_U (L2_AP(AP_U))
#define L2_L_PROT_W (L2_AP(AP_W))
#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
#else
#define L1_S_PROT_U (L1_S_AP(AP_U))
#define L1_S_PROT_MASK (L1_S_APX|L1_S_AP(0x3))
#define L1_S_WRITABLE(pd) (!((pd) & L1_S_APX))
#define L1_S_PROT(ku, pr) (L1_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L1_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L1_S_APX : 0)))
#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3))
#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
#endif
/*
* Macros to test if a mapping is mappable with an L1 Section mapping
@ -422,7 +541,7 @@ extern pt_entry_t pte_l2_s_proto;
extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
void pmap_zero_page_generic(vm_paddr_t, int, int);
@ -436,6 +555,9 @@ void pmap_pte_init_arm9(void);
#if defined(CPU_ARM10)
void pmap_pte_init_arm10(void);
#endif /* CPU_ARM10 */
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
void pmap_pte_init_mmu_v6(void);
#endif /* CPU_ARM11 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if /* ARM_MMU_SA1 == */1

View File

@ -37,18 +37,11 @@
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
#define PDSHIFT 20 /* LOG2(NBPDR) */
#define NBPD (1 << PDSHIFT) /* bytes/page dir */
#define NPTEPD (NBPD / PAGE_SIZE)
#ifndef LOCORE
typedef uint32_t pd_entry_t; /* page directory entry */
typedef uint32_t pt_entry_t; /* page table entry */
#endif
#define PD_MASK 0xfff00000 /* page directory address bits */
#define PT_MASK 0x000ff000 /* page table address bits */
#define PG_FRAME 0xfffff000
/* The PT_SIZE definition is misleading... A page table is only 0x400
@ -73,27 +66,6 @@ typedef uint32_t pt_entry_t; /* page table entry */
#define L2_MASK 0x03 /* Mask for L2 entry type */
#define L2_INVAL 0x00 /* L2 invalid type */
/* PTE construction macros */
#define L2_LPTE(p, a, f) ((p) | PT_AP(a) | L2_LPAGE | (f))
#define L2_SPTE(p, a, f) ((p) | PT_AP(a) | L2_SPAGE | (f))
#define L2_PTE(p, a) L2_SPTE((p), (a), PT_CACHEABLE)
#define L2_PTE_NC(p, a) L2_SPTE((p), (a), PT_B)
#define L2_PTE_NC_NB(p, a) L2_SPTE((p), (a), 0)
#define L1_SECPTE(p, a, f) ((p) | ((a) << AP_SECTION_SHIFT) | (f) \
| L1_SECTION | PT_U)
#define L1_PTE(p) ((p) | 0x00 | L1_PAGE | PT_U)
#define L1_SEC(p, c) L1_SECPTE((p), AP_KRW, (c))
#define L1_SEC_SIZE (1 << PDSHIFT)
#define L2_LPAGE_SIZE (NBPG * 16)
/* Domain types */
#define DOMAIN_FAULT 0x00
#define DOMAIN_CLIENT 0x01
#define DOMAIN_RESERVED 0x02
#define DOMAIN_MANAGER 0x03
/* L1 and L2 address masks */
#define L1_ADDR_MASK 0xfffffc00
#define L2_ADDR_MASK 0xfffff000
@ -205,7 +177,10 @@ typedef uint32_t pt_entry_t; /* page table entry */
#define L1_S_DOM_MASK L1_S_DOM(0xf)
#define L1_S_AP(x) ((x) << 10) /* access permissions */
#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */
#define L1_SHARED (1 << 16)
#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */
#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */
#define L1_S_APX (1 << 15)
#define L1_SHARED (1 << 16)
#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */
#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */
@ -256,7 +231,14 @@ typedef uint32_t pt_entry_t; /* page table entry */
#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */
#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */
#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
#define L2_SHARED (1 << 10)
#define L2_APX (1 << 9)
#define L2_XN (1 << 0)
#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */
#define L2_L_TEX(x) (((x) & 0x7) << 12)
#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */
#define L2_S_TEX(x) (((x) & 0x7) << 6)
#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */
#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */

View File

@ -3,4 +3,33 @@
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
#include <sys/_cpuset.h>
#define IPI_AST 0
#define IPI_PREEMPT 2
#define IPI_RENDEZVOUS 3
#define IPI_STOP 4
#define IPI_STOP_HARD 5
#define IPI_HARDCLOCK 6
#define IPI_TLB 7
void init_secondary(int cpu);
void ipi_all_but_self(u_int ipi);
void ipi_cpu(int cpu, u_int ipi);
void ipi_selected(cpuset_t cpus, u_int ipi);
/* PIC interface */
void pic_ipi_send(cpuset_t cpus, u_int ipi);
void pic_ipi_clear(int ipi);
int pic_ipi_get(int arg);
/* Platform interface */
void platform_mp_setmaxid(void);
int platform_mp_probe(void);
void platform_mp_start_ap(void);
void platform_mp_init_secondary(void);
void platform_ipi_send(cpuset_t cpus, u_int ipi);
#endif /* !_MACHINE_SMP_H_ */

View File

@ -50,9 +50,18 @@
* if ARM_RAS_END moves in relation to ARM_RAS_START (look for occurrances
* of ldr/str rm,[rn, #4]).
*/
/* ARM_TP_ADDRESS is needed for processors that don't support
* the exclusive-access opcodes introduced with ARMv6K. */
/* TODO: #if !defined(_HAVE_ARMv6K_INSTRUCTIONS) */
#if !defined (__ARM_ARCH_7__) && \
!defined (__ARM_ARCH_7A__) && \
!defined (__ARM_ARCH_6K__) && \
!defined (__ARM_ARCH_6ZK__)
#define ARM_TP_ADDRESS (ARM_VECTORS_HIGH + 0x1000)
#define ARM_RAS_START (ARM_TP_ADDRESS + 4)
#define ARM_RAS_END (ARM_TP_ADDRESS + 8)
#endif
#ifndef LOCORE
#ifndef __ASSEMBLER__

128
sys/arm/include/vfp.h Normal file
View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2012 Mark Tinguely
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* $FreeBSD$
*/
#ifndef _MACHINE__VFP_H_
#define _MACHINE__VFP_H_
/* fpsid, fpscr, fpexc are defined in the newer gas */
#define VFPSID cr0
#define VFPSCR cr1
#define VMVFR1 cr6
#define VMVFR0 cr7
#define VFPEXC cr8
#define VFPINST cr9 /* vfp 1 and 2 except instruction */
#define VFPINST2 cr10 /* vfp 2? */
/* VFPSID */
#define VFPSID_IMPLEMENTOR_OFF 24
#define VFPSID_IMPLEMENTOR_MASK (0xff000000)
#define VFPSID_HARDSOFT_IMP (0x00800000)
#define VFPSID_SINGLE_PREC 20 /* version 1 and 2 */
#define VFPSID_SUBVERSION_OFF 16
#define VFPSID_SUBVERSION2_MASK (0x000f0000) /* version 1 and 2 */
#define VFPSID_SUBVERSION3_MASK (0x007f0000) /* version 3 */
#define VFP_ARCH3 (0x00030000)
#define VFPSID_PARTNUMBER_OFF 8
#define VFPSID_PARTNUMBER_MASK (0x0000ff00)
#define VFPSID_VARIANT_OFF 4
#define VFPSID_VARIANT_MASK (0x000000f0)
#define VFPSID_REVISION_MASK 0x0f
/* VFPSCR */
#define VFPSCR_CC_N (0x80000000) /* comparison less than */
#define VFPSCR_CC_Z (0x40000000) /* comparison equal */
#define VFPSCR_CC_C (0x20000000) /* comparison = > unordered */
#define VFPSCR_CC_V (0x10000000) /* comparison unordered */
#define VFPSCR_QC (0x08000000) /* saturation cululative */
#define VFPSCR_DN (0x02000000) /* default NaN enable */
#define VFPSCR_FZ (0x01000000) /* flush to zero enabled */
#define VFPSCR_RMODE_OFF 22 /* rounding mode offset */
#define VFPSCR_RMODE_MASK (0x00c00000) /* rounding mode mask */
#define VFPSCR_RMODE_RN (0x00000000) /* round nearest */
#define VFPSCR_RMODE_RPI (0x00400000) /* round to plus infinity */
#define VFPSCR_RMODE_RNI (0x00800000) /* round to neg infinity */
#define VFPSCR_RMODE_RM (0x00c00000) /* round to zero */
#define VFPSCR_STRIDE_OFF 20 /* vector stride -1 */
#define VFPSCR_STRIDE_MASK (0x00300000)
#define VFPSCR_LEN_OFF 16 /* vector length -1 */
#define VFPSCR_LEN_MASK (0x00070000)
#define VFPSCR_IDE (0x00008000) /* input subnormal exc enable */
#define VFPSCR_IXE (0x00001000) /* inexact exception enable */
#define VFPSCR_UFE (0x00000800) /* underflow exception enable */
#define VFPSCR_OFE (0x00000400) /* overflow exception enable */
#define VFPSCR_DNZ (0x00000200) /* div by zero exception en */
#define VFPSCR_IOE (0x00000100) /* invalid op exec enable */
#define VFPSCR_IDC (0x00000080) /* input subnormal cumul */
#define VFPSCR_IXC (0x00000010) /* Inexact cumulative flag */
#define VFPSCR_UFC (0x00000008) /* underflow cumulative flag */
#define VFPSCR_OFC (0x00000004) /* overflow cumulative flag */
#define VFPSCR_DZC (0x00000002) /* division by zero flag */
#define VFPSCR_IOC (0x00000001) /* invalid operation cumul */
/* VFPEXC */
#define VFPEXC_EX (0x80000000) /* exception v1 v2 */
#define VFPEXC_EN (0x40000000) /* vfp enable */
/* version 3 registers */
/* VMVFR0 */
#define VMVFR0_RM_OFF 28
#define VMVFR0_RM_MASK (0xf0000000) /* VFP rounding modes */
#define VMVFR0_SV_OFF 24
#define VMVFR0_SV_MASK (0x0f000000) /* VFP short vector supp */
#define VMVFR0_SR_OFF 20
#define VMVFR0_SR (0x00f00000) /* VFP hw sqrt supp */
#define VMVFR0_D_OFF 16
#define VMVFR0_D_MASK (0x000f0000) /* VFP divide supp */
#define VMVFR0_TE_OFF 12
#define VMVFR0_TE_MASK (0x0000f000) /* VFP trap exception supp */
#define VMVFR0_DP_OFF 8
#define VMVFR0_DP_MASK (0x00000f00) /* VFP double prec support */
#define VMVFR0_SP_OFF 4
#define VMVFR0_SP_MASK (0x000000f0) /* VFP single prec support */
#define VMVFR0_RB_MASK (0x0000000f) /* VFP 64 bit media support */
/* VMVFR1 */
#define VMVFR1_SP_OFF 16
#define VMVFR1_SP_MASK (0x000f0000) /* Neon single prec support */
#define VMVFR1_I_OFF 12
#define VMVFR1_I_MASK (0x0000f000) /* Neon integer support */
#define VMVFR1_LS_OFF 8
#define VMVFR1_LS_MASK (0x00000f00) /* Neon ld/st instr support */
#define VMVFR1_DN_OFF 4
#define VMVFR1_DN_MASK (0x000000f0) /* Neon prop NaN support */
#define VMVFR1_FZ_MASK (0x0000000f) /* Neon denormal arith supp */
#define COPROC10 (0x3 << 20)
#define COPROC11 (0x3 << 22)
#endif

View File

@ -116,7 +116,9 @@
#endif
#define VM_MAXUSER_ADDRESS KERNBASE - ARM_KERN_DIRECTMAP
#else /* ARM_USE_SMALL_ALLOC */
#ifndef VM_MAXUSER_ADDRESS
#define VM_MAXUSER_ADDRESS KERNBASE
#endif /* VM_MAXUSER_ADDRESS */
#endif /* ARM_USE_SMALL_ALLOC */
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS

View File

@ -118,9 +118,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];
@ -241,8 +238,7 @@ initarm(struct arm_boot_params *abp)
set_cpufuncs();
cpufuncs.cf_sleep = s3c24x0_sleep;
pcpu_init(pcpup, 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();

View File

@ -7,4 +7,5 @@ options KERNPHYSADDR=0x30000000
options KERNVIRTADDR=0xc0000000
options PHYSADDR=0x30000000
options STARTUP_PAGETABLE_ADDR=0x30800000
options NO_EVENTTIMERS

View File

@ -4,3 +4,4 @@ files "../s3c2xx0/files.s3c2xx0"
cpu CPU_ARM9
makeoptions CONF_CFLAGS=-mcpu=arm920t
options NO_EVENTTIMERS

View File

@ -123,9 +123,6 @@ extern vm_offset_t sa1110_uart_vaddr;
extern vm_offset_t sa1_cache_clean_addr;
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
#ifndef MD_ROOT_SIZE
#define MD_ROOT_SIZE 65535
#endif
@ -197,7 +194,6 @@ cpu_reset()
void *
initarm(struct arm_boot_params *abp)
{
struct pcpu *pc;
struct pv_addr kernel_l1pt;
struct pv_addr md_addr;
struct pv_addr md_bla;
@ -215,9 +211,7 @@ initarm(struct arm_boot_params *abp)
cninit();
set_cpufuncs();
physmem = memsize / PAGE_SIZE;
pc = &__pcpu;
pcpu_init(pc, 0, sizeof(struct pcpu));
PCPU_SET(curthread, &thread0);
pcpu0_init();
/* Do basic tuning, hz etc */
init_param1();

View File

@ -5,3 +5,4 @@ cpu CPU_SA1100
cpu CPU_SA1110
makeoptions KERNPHYSADDR=0xc0000000
makeoptions KERNVIRTADDR=0xc0000000
options NO_EVENTTIMERS

View File

@ -115,9 +115,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];

View File

@ -115,9 +115,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];

View File

@ -118,9 +118,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];

View File

@ -119,9 +119,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[10];

View File

@ -115,9 +115,6 @@ extern u_int undefined_handler_address;
struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
struct pcpu __pcpu;
struct pcpu *pcpup = &__pcpu;
/* Physical and virtual addresses for some global pages */
vm_paddr_t phys_avail[PXA2X0_SDRAM_BANKS * 2 + 4];

View File

@ -1,3 +1,4 @@
# $FreeBSD$
# machine arm armeb
options ARM_CACHE_LOCK_ENABLE
options NO_EVENTTIMERS

View File

@ -75,7 +75,9 @@ FILES_CPU_FUNC = $S/$M/$M/cpufunc_asm_arm7tdmi.S \
$S/$M/$M/cpufunc_asm_sa1.S $S/$M/$M/cpufunc_asm_arm10.S \
$S/$M/$M/cpufunc_asm_xscale.S $S/$M/$M/cpufunc_asm.S \
$S/$M/$M/cpufunc_asm_xscale_c3.S $S/$M/$M/cpufunc_asm_armv5_ec.S \
$S/$M/$M/cpufunc_asm_sheeva.S $S/$M/$M/cpufunc_asm_fa526.S
$S/$M/$M/cpufunc_asm_fa526.S $S/$M/$M/cpufunc_asm_sheeva.S \
$S/$M/$M/cpufunc_asm_pj4b.S $S/$M/$M/cpufunc_asm_armv7.S
KERNEL_EXTRA=trampoline
KERNEL_EXTRA_INSTALL=kernel.gz.tramp
trampoline: ${KERNEL_KO}.tramp

View File

@ -7,7 +7,8 @@ arm/arm/bcopyinout.S standard
arm/arm/blockio.S standard
arm/arm/bootconfig.c standard
arm/arm/bus_space_asm_generic.S standard
arm/arm/busdma_machdep.c standard
arm/arm/busdma_machdep.c optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_sa1100 | cpu_sa1110 | cpu_xscale_80219 | cpu_xscale_80321 | cpu_xscale_81342 | cpu_xscale_ixp425 | cpu_xscale_ixp435 | cpu_xscale_pxa2x0
arm/arm/busdma_machdep-v6.c optional cpu_arm11 | cpu_cortexa | cpu_mv_pj4b
arm/arm/copystr.S standard
arm/arm/cpufunc.c standard
arm/arm/cpufunc_asm.S standard
@ -31,8 +32,11 @@ arm/arm/locore.S standard no-obj
arm/arm/machdep.c standard
arm/arm/mem.c optional mem
arm/arm/minidump_machdep.c optional mem
arm/arm/mp_machdep.c optional smp
arm/arm/nexus.c standard
arm/arm/pmap.c standard
arm/arm/pl310.c optional pl310
arm/arm/pmap.c optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_sa1100 | cpu_sa1110 | cpu_xscale_80219 | cpu_xscale_80321 | cpu_xscale_81342 | cpu_xscale_ixp425 | cpu_xscale_ixp435 | cpu_xscale_pxa2x0
arm/arm/pmap-v6.c optional cpu_arm11 | cpu_cortexa | cpu_mv_pj4b
arm/arm/setcpsr.S standard
arm/arm/setstack.s standard
arm/arm/stack_machdep.c optional ddb | stack
@ -44,6 +48,7 @@ arm/arm/uio_machdep.c standard
arm/arm/undefined.c standard
arm/arm/vectors.S standard
arm/arm/vm_machdep.c standard
arm/arm/vfp.c optional vfp
arm/fpe-arm/armfpe_glue.S optional armfpe
arm/fpe-arm/armfpe_init.c optional armfpe
arm/fpe-arm/armfpe.S optional armfpe

View File

@ -3,13 +3,21 @@ ARM9_CACHE_WRITE_THROUGH opt_global.h
ARM_CACHE_LOCK_ENABLE opt_global.h
ARMFPE opt_global.h
ARM_KERN_DIRECTMAP opt_vm.h
ARM_L2_PIPT opt_global.h
ARM_MANY_BOARD opt_global.h
ARM_USE_SMALL_ALLOC opt_global.h
ARM_VFP_SUPPORT opt_global.h
ARM_WANT_TP_ADDRESS opt_global.h
COUNTS_PER_SEC opt_timer.h
CPU_SA1100 opt_global.h
CPU_SA1110 opt_global.h
CPU_ARM9 opt_global.h
CPU_ARM9E opt_global.h
CPU_ARM11 opt_global.h
CPU_CORTEXA opt_global.h
CPU_FA526 opt_global.h
CPU_FA626TE opt_global.h
CPU_MV_PJ4B opt_global.h
CPU_SA1100 opt_global.h
CPU_SA1110 opt_global.h
CPU_XSCALE_80219 opt_global.h
CPU_XSCALE_80321 opt_global.h
CPU_XSCALE_81342 opt_global.h
@ -17,24 +25,34 @@ CPU_XSCALE_IXP425 opt_global.h
CPU_XSCALE_IXP435 opt_global.h
CPU_XSCALE_PXA2X0 opt_global.h
FLASHADDR opt_global.h
IPI_IRQ_START opt_smp.h
IPI_IRQ_END opt_smp.h
FREEBSD_BOOT_LOADER opt_global.h
IXP4XX_FLASH_SIZE opt_global.h
KERNPHYSADDR opt_global.h
KERNVIRTADDR opt_global.h
LINUX_BOOT_ABI opt_global.h
LOADERRAMADDR opt_global.h
NO_EVENTTIMERS opt_timer.h
PHYSADDR opt_global.h
QEMU_WORKAROUNDS opt_global.h
SOC_MV_ARMADAXP opt_global.h
SOC_MV_DISCOVERY opt_global.h
SOC_MV_DOVE opt_global.h
SOC_MV_FREY opt_global.h
SOC_MV_KIRKWOOD opt_global.h
SOC_MV_LOKIPLUS opt_global.h
SOC_MV_ORION opt_global.h
SOC_OMAP3 opt_global.h
SOC_OMAP4 opt_global.h
SOC_TI_AM335X opt_global.h
SOC_TEGRA2 opt_global.h
STARTUP_PAGETABLE_ADDR opt_global.h
XSCALE_CACHE_READ_WRITE_ALLOCATE opt_global.h
XSACLE_DISABLE_CCNT opt_timer.h
VERBOSE_INIT_ARM opt_global.h
VM_MAXUSER_ADDRESS opt_global.h
AT91_ATE_USE_RMII opt_at91.h
AT91_MCI_HAS_4WIRE opt_at91.h
AT91_MCI_SLOT_B opt_at91.h
AT91C_MAIN_CLOCK opt_at91.h
CPU_FA526 opt_global.h
CPU_FA626TE opt_global.h