Remove trailing whitespace from sys/arm/arm

This commit is contained in:
Andrew Turner 2015-05-24 12:20:11 +00:00
parent ad25ff4509
commit 66fb2f18cd
37 changed files with 279 additions and 283 deletions

View File

@ -69,7 +69,7 @@ __FBSDID("$FreeBSD$");
#define SAVE_REGS stmfd sp!, {r4-r11}
#define RESTORE_REGS ldmfd sp!, {r4-r11}
#if defined(_ARM_ARCH_5E)
#define HELLOCPP #
#define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ]
@ -88,7 +88,7 @@ __FBSDID("$FreeBSD$");
* r4-r11 are scratch
*/
ENTRY(copyin)
/* Quick exit if length is zero */
/* Quick exit if length is zero */
teq r2, #0
moveq r0, #0
RETeq
@ -326,7 +326,7 @@ END(copyin)
*/
ENTRY(copyout)
/* Quick exit if length is zero */
/* Quick exit if length is zero */
teq r2, #0
moveq r0, #0
RETeq

View File

@ -85,7 +85,7 @@ ENTRY(copyin)
ldmfd sp!, {r0-r2, r4, lr}
moveq r0, #0
RETeq
.Lnormal:
stmfd sp!, {r10-r11, lr}
@ -527,8 +527,8 @@ ENTRY(copyout)
ldmfd sp!, {r0-r2, r4, lr}
moveq r0, #0
RETeq
.Lnormale:
.Lnormale:
stmfd sp!, {r10-r11, lr}
GET_PCB(r10)
@ -584,7 +584,7 @@ ENTRY(copyout)
ldrne ip, [r0], #0x04
subne r2, r2, #0x04
strtne ip, [r1], #0x04
stmfd sp!, {r4-r9} /* Free up some registers */
mov r3, #-1 /* Signal restore r4-r9 */

View File

@ -43,7 +43,7 @@
*
* Created : 08/10/94
* Modified : 22/01/99 -- R.Earnshaw
* Faster, and small tweaks for StrongARM
* Faster, and small tweaks for StrongARM
*/
#include <machine/asm.h>
@ -251,7 +251,7 @@ ENTRY(outsw)
str r3, [r0]
str ip, [r0]
/* mov ip, r3, lsl #16
* orr ip, ip, ip, lsr #16
* str ip, [r0]
@ -358,7 +358,7 @@ ENTRY(outsw16)
eor r3, r3, r4, lsl #16 /* r3 = (A^B^A)(B) = (B)(B) */
str r3, [r0]
str r4, [r0]
/* mov r3, r4, lsl #16
* orr r3, r3, r3, lsr #16
* str r3, [r0]

View File

@ -116,10 +116,10 @@ static struct bus_space arm_base_bus_space __aligned(CACHE_LINE_SIZE) = {
.bs_c_8 = BS_UNIMPLEMENTED,
/* read stream (single) */
.bs_r_1_s = NULL, /* Use inline code in bus.h */
.bs_r_2_s = NULL, /* Use inline code in bus.h */
.bs_r_4_s = NULL, /* Use inline code in bus.h */
.bs_r_8_s = NULL, /* Use inline code in bus.h */
.bs_r_1_s = NULL, /* Use inline code in bus.h */
.bs_r_2_s = NULL, /* Use inline code in bus.h */
.bs_r_4_s = NULL, /* Use inline code in bus.h */
.bs_r_8_s = NULL, /* Use inline code in bus.h */
/* read multiple stream */
.bs_rm_1_s = generic_bs_rm_1,
@ -134,10 +134,10 @@ static struct bus_space arm_base_bus_space __aligned(CACHE_LINE_SIZE) = {
.bs_rr_8_s = BS_UNIMPLEMENTED,
/* write stream (single) */
.bs_w_1_s = NULL, /* Use inline code in bus.h */
.bs_w_2_s = NULL, /* Use inline code in bus.h */
.bs_w_4_s = NULL, /* Use inline code in bus.h */
.bs_w_8_s = NULL, /* Use inline code in bus.h */
.bs_w_1_s = NULL, /* Use inline code in bus.h */
.bs_w_2_s = NULL, /* Use inline code in bus.h */
.bs_w_4_s = NULL, /* Use inline code in bus.h */
.bs_w_8_s = NULL, /* Use inline code in bus.h */
/* write multiple stream */
.bs_wm_1_s = generic_bs_wm_1,

View File

@ -160,7 +160,7 @@ SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0,
"Number of active maps for bus_dmamem_alloc buffers");
SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0,
"Number of active maps with BUS_DMA_COHERENT flag set");
SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD,
SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD,
&maploads_total, "Number of load operations performed");
SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD,
&maploads_bounced, "Number of load operations that used bounce buffers");
@ -230,14 +230,14 @@ busdma_init(void *dummy)
uma_flags = 0;
/* Create a cache of buffers in standard (cacheable) memory. */
standard_allocator = busdma_bufalloc_create("buffer",
standard_allocator = busdma_bufalloc_create("buffer",
arm_dcache_align, /* minimum_alignment */
NULL, /* uma_alloc func */
NULL, /* uma_alloc func */
NULL, /* uma_free func */
uma_flags); /* uma_zcreate_flags */
#ifdef INVARIANTS
/*
/*
* Force UMA zone to allocate service structures like
* slabs using own allocator. uma_debug code performs
* atomic ops on uma_slab_t fields and safety of this
@ -251,8 +251,8 @@ busdma_init(void *dummy)
*/
coherent_allocator = busdma_bufalloc_create("coherent",
arm_dcache_align, /* minimum_alignment */
busdma_bufalloc_alloc_uncacheable,
busdma_bufalloc_free_uncacheable,
busdma_bufalloc_alloc_uncacheable,
busdma_bufalloc_free_uncacheable,
uma_flags); /* uma_zcreate_flags */
}
@ -343,7 +343,7 @@ cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
* address spaces.
*/
static __inline int
might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
bus_size_t size)
{
@ -364,7 +364,7 @@ might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
* the DMA needs to bounce, otherwise any DMA within the zone bounces.
*/
static int
must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
bus_size_t size)
{
@ -387,11 +387,11 @@ must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
*/
while (dmat != NULL && exclusion_bounce(dmat)) {
if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
(dmat->filter == NULL ||
(dmat->filter == NULL ||
dmat->filter(dmat->filterarg, paddr) != 0))
return (1);
dmat = dmat->parent;
}
}
return (0);
}
@ -554,7 +554,7 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
* number of pages in a transfer.
*/
maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE;
if ((error = alloc_bounce_zone(newtag)) != 0) {
free(newtag, M_DEVBUF);
return (error);
@ -630,7 +630,7 @@ static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
struct bounce_zone *bz;
int maxpages;
int error;
if (dmat->bounce_zone == NULL)
if ((error = alloc_bounce_zone(dmat)) != 0)
return (error);
@ -651,13 +651,13 @@ static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
(bz->map_count > 0 && bz->total_bpages < maxpages)) {
int pages;
pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
pages = MIN(maxpages - bz->total_bpages, pages);
pages = MAX(pages, 2);
if (alloc_bounce_pages(dmat, pages) < pages)
return (ENOMEM);
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
}
@ -676,7 +676,7 @@ allocate_map(bus_dma_tag_t dmat, int mflags)
* variable-sized array of sync_list structures. Following that
* we allocate enough extra space to hold the array of bus_dma_segments.
*/
KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
("cannot allocate %u dma segments (max is %u)",
dmat->nsegments, MAX_DMA_SEGMENTS));
segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
@ -931,7 +931,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
else
paddr = pmap_extract(map->pmap, vaddr);
if (must_bounce(dmat, map, paddr,
min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
PAGE_MASK)))) != 0) {
map->pagesneeded++;
}
@ -1267,7 +1267,7 @@ _bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len,
bus_addr_t curaddr;
vm_offset_t va;
/*
/*
* each synclist entry is contained within a single page.
* this would be needed if BUS_DMASYNC_POSTxxxx was implemented
*/
@ -1339,7 +1339,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
bpage->datacount);
l2cache_wb_range((vm_offset_t)bpage->vaddr,
(vm_offset_t)bpage->busaddr,
(vm_offset_t)bpage->busaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
@ -1387,10 +1387,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
startv = bpage->vaddr &~ arm_dcache_align_mask;
startp = bpage->busaddr &~ arm_dcache_align_mask;
len = bpage->datacount;
if (startv != bpage->vaddr)
len += bpage->vaddr & arm_dcache_align_mask;
if (len & arm_dcache_align_mask)
if (len & arm_dcache_align_mask)
len = (len -
(len & arm_dcache_align_mask)) +
arm_dcache_align;
@ -1473,7 +1473,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
sl->busaddr, 1);
}
cpu_dcache_inv_range(sl->vaddr, sl->datacount);
l2cache_inv_range(sl->vaddr, sl->busaddr,
l2cache_inv_range(sl->vaddr, sl->busaddr,
sl->datacount);
sl++;
}
@ -1485,7 +1485,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
case BUS_DMASYNC_POSTREAD:
case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
while (sl != end) {
l2cache_inv_range(sl->vaddr, sl->busaddr,
l2cache_inv_range(sl->vaddr, sl->busaddr,
sl->datacount);
cpu_dcache_inv_range(sl->vaddr, sl->datacount);
sl++;

View File

@ -237,7 +237,7 @@ dmamap_ctor(void *mem, int size, void *arg, int flags)
* This is the dtor function passed to uma_zcreate() for the pool of dma maps.
* It may need platform-specific changes if this code is copied .
*/
static void
static void
dmamap_dtor(void *mem, int size, void *arg)
{
bus_dmamap_t map;
@ -256,9 +256,9 @@ busdma_init(void *dummy)
dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
/* Create a cache of buffers in standard (cacheable) memory. */
standard_allocator = busdma_bufalloc_create("buffer",
standard_allocator = busdma_bufalloc_create("buffer",
arm_dcache_align, /* minimum_alignment */
NULL, /* uma_alloc func */
NULL, /* uma_alloc func */
NULL, /* uma_free func */
0); /* uma_zcreate_flags */
@ -268,8 +268,8 @@ busdma_init(void *dummy)
*/
coherent_allocator = busdma_bufalloc_create("coherent",
arm_dcache_align, /* minimum_alignment */
busdma_bufalloc_alloc_uncacheable,
busdma_bufalloc_free_uncacheable,
busdma_bufalloc_alloc_uncacheable,
busdma_bufalloc_free_uncacheable,
0); /* uma_zcreate_flags */
}
@ -308,7 +308,7 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
|| (*dmat->filter)(dmat->filterarg, paddr) != 0))
retval = 1;
dmat = dmat->parent;
dmat = dmat->parent;
} while (retval == 0 && dmat != NULL);
return (retval);
}
@ -531,13 +531,13 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
#endif
if (dmat != NULL) {
if (dmat->map_count != 0)
return (EBUSY);
while (dmat != NULL) {
bus_dma_tag_t parent;
parent = dmat->parent;
atomic_subtract_int(&dmat->ref_count, 1);
if (dmat->ref_count == 0) {
@ -588,7 +588,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
* now, because we can't sleep for resources at map load time.
*/
if (dmat->segments == NULL) {
dmat->segments = malloc(dmat->nsegments *
dmat->segments = malloc(dmat->nsegments *
sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT);
if (dmat->segments == NULL) {
free(slist, M_DEVBUF);
@ -701,7 +701,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
* now, because we can't sleep for resources at map load time.
*/
if (dmat->segments == NULL)
dmat->segments = malloc(dmat->nsegments *
dmat->segments = malloc(dmat->nsegments *
sizeof(*dmat->segments), M_DEVBUF, mflags);
slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
@ -918,7 +918,7 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
* memory address to an address in the DMA window.
*/
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
}
seg = *segp;
@ -1206,7 +1206,7 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr, bpage->datacount);
else
physcopyout(bpage->dataaddr,

View File

@ -38,7 +38,6 @@
* Created : 16/05/95
*/
#include "assym.s"
#include <machine/asm.h>
#include <machine/armreg.h>

View File

@ -33,10 +33,10 @@
#include <machine/armreg.h>
#include <machine/sysreg.h>
/*
/*
* Define cache functions used by startup code, which counts on the fact that
* only r0-r3,r12 (ip) are modified and no stack space is used. These functions
* must be called with interrupts disabled. Moreover, these work only with
* must be called with interrupts disabled. Moreover, these work only with
* caches integrated to CPU (accessible via CP15); systems with an external L2
* cache controller such as a PL310 need separate calls to that device driver
* to affect L2 caches. This is not a factor during early kernel startup, as

View File

@ -361,7 +361,7 @@ struct cpu_functions pj4bv7_cpufuncs = {
struct cpu_functions xscale_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
xscale_cpwait, /* cpwait */
@ -426,7 +426,7 @@ struct cpu_functions xscale_cpufuncs = {
#ifdef CPU_XSCALE_81342
struct cpu_functions xscalec3_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
xscale_cpwait, /* cpwait */
@ -553,63 +553,63 @@ struct cpu_functions fa526_cpufuncs = {
#if defined(CPU_ARM1176)
struct cpu_functions arm1176_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
arm11x6_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/* TLB functions */
arm11_tlb_flushID, /* tlb_flushID */
arm11_tlb_flushID_SE, /* tlb_flushID_SE */
arm11_tlb_flushI, /* tlb_flushI */
arm11_tlb_flushI_SE, /* tlb_flushI_SE */
arm11_tlb_flushD, /* tlb_flushD */
arm11_tlb_flushD_SE, /* tlb_flushD_SE */
/* Cache operations */
arm11x6_icache_sync_all, /* icache_sync_all */
arm11x6_icache_sync_range, /* icache_sync_range */
arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
armv6_dcache_wbinv_range, /* dcache_wbinv_range */
armv6_dcache_inv_range, /* dcache_inv_range */
armv6_dcache_wb_range, /* dcache_wb_range */
armv6_idcache_inv_all, /* idcache_inv_all */
arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_wbinv_all */
(void *)cpufunc_nullop, /* l2cache_wbinv_range */
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
arm11_drain_writebuf, /* drain_writebuf */
cpufunc_nullop, /* flush_brnchtgt_C */
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
arm11x6_sleep, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
arm11_context_switch, /* context_switch */
arm11x6_setup /* cpu setup */
};
#endif /*CPU_ARM1176 */
@ -617,46 +617,46 @@ struct cpu_functions arm1176_cpufuncs = {
#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
struct cpu_functions cortexa_cpufuncs = {
/* CPU functions */
cpufunc_id, /* id */
cpufunc_nullop, /* cpwait */
/* MMU functions */
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
armv7_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
/*
/*
* TLB functions. ARMv7 does all TLB ops based on a unified TLB model
* whether the hardware implements separate I+D or not, so we use the
* same 'ID' functions for all 3 variations.
*/
armv7_tlb_flushID, /* tlb_flushID */
armv7_tlb_flushID_SE, /* tlb_flushID_SE */
armv7_tlb_flushID, /* tlb_flushI */
armv7_tlb_flushID_SE, /* tlb_flushI_SE */
armv7_tlb_flushID, /* tlb_flushD */
armv7_tlb_flushID_SE, /* tlb_flushD_SE */
/* Cache operations */
armv7_icache_sync_all, /* icache_sync_all */
armv7_icache_sync_range, /* icache_sync_range */
armv7_dcache_wbinv_all, /* dcache_wbinv_all */
armv7_dcache_wbinv_range, /* dcache_wbinv_range */
armv7_dcache_inv_range, /* dcache_inv_range */
armv7_dcache_wb_range, /* dcache_wb_range */
armv7_idcache_inv_all, /* idcache_inv_all */
armv7_idcache_wbinv_all, /* idcache_wbinv_all */
armv7_idcache_wbinv_range, /* idcache_wbinv_range */
/*
/*
* Note: For CPUs using the PL310 the L2 ops are filled in when the
* L2 cache controller is actually enabled.
*/
@ -665,23 +665,23 @@ struct cpu_functions cortexa_cpufuncs = {
(void *)cpufunc_nullop, /* l2cache_inv_range */
(void *)cpufunc_nullop, /* l2cache_wb_range */
(void *)cpufunc_nullop, /* l2cache_drain_writebuf */
/* Other functions */
cpufunc_nullop, /* flush_prefetchbuf */
armv7_drain_writebuf, /* drain_writebuf */
cpufunc_nullop, /* flush_brnchtgt_C */
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
armv7_cpu_sleep, /* sleep */
/* Soft functions */
cpufunc_null_fixup, /* dataabt_fixup */
cpufunc_null_fixup, /* prefetchabt_fixup */
armv7_context_switch, /* context_switch */
cortexa_setup /* cpu setup */
};
#endif /* CPU_CORTEXA */
@ -758,7 +758,7 @@ get_cachetype_cp15()
__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
: "=r" (csize));
arm_cache_type[sel] = csize;
arm_dcache_align = 1 <<
arm_dcache_align = 1 <<
(CPUV7_CT_xSIZE_LEN(csize) + 4);
arm_dcache_align_mask = arm_dcache_align - 1;
}
@ -913,12 +913,12 @@ set_cpufuncs()
cpufuncs = cortexa_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
pmap_pte_init_mmu_v6();
goto out;
}
#endif /* CPU_CORTEXA */
#if defined(CPU_MV_PJ4B)
if (cputype == CPU_ID_MV88SV581X_V7 ||
cputype == CPU_ID_MV88SV584X_V7 ||
@ -1165,7 +1165,7 @@ arm11x6_setup(void)
(3 << 30) | /* SBZ */
(1 << 29) | /* FA */
(1 << 28) | /* TR */
(3 << 26) | /* SBZ */
(3 << 26) | /* SBZ */
(3 << 19) | /* SBZ */
(1 << 17); /* SBZ */
@ -1258,39 +1258,39 @@ void
cortexa_setup(void)
{
int cpuctrl, cpuctrlmask;
cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
cpuctrl = CPU_CONTROL_MMU_ENABLE |
CPU_CONTROL_IC_ENABLE |
CPU_CONTROL_DC_ENABLE |
CPU_CONTROL_BPRD_ENABLE;
#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
#endif
/* Switch to big endian */
#ifdef __ARMEB__
cpuctrl |= CPU_CONTROL_BEND_ENABLE;
#endif
/* Check if the vector page is at the high address (0xffff0000) */
if (vector_page == ARM_VECTORS_HIGH)
cpuctrl |= CPU_CONTROL_VECRELOC;
/* Clear out the cache */
cpu_idcache_wbinv_all();
/* Set the control register */
ctrl = cpuctrl;
cpu_control(cpuctrlmask, cpuctrl);
/* And again. */
cpu_idcache_wbinv_all();
#ifdef SMP

View File

@ -73,7 +73,7 @@ __FBSDID("$FreeBSD$");
*
* Erratum 411920 in ARM1136 (fixed in r1p4)
* Erratum 415045 in ARM1176 (fixed in r0p5?)
*
*
* - value of arg 'reg' Should Be Zero
*/
#define Invalidate_I_cache(Rtmp1, Rtmp2) \
@ -150,20 +150,20 @@ ENTRY_NP(arm11x6_icache_sync_range)
/* Erratum ARM1176 371367 */
mrs r2, cpsr /* save the CPSR */
cpsid ifa /* disable interrupts (irq,fiq,abort) */
mov r3, #0
mov r3, #0
mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */
mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */
add r3, pc, #0x24
add r3, pc, #0x24
mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */
mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */
msr cpsr_cx, r2 /* local_irq_restore */
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
mcrr p15, 0, r1, r0, c12 /* clean and invalidate D cache range */ /* XXXNH */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
@ -177,20 +177,20 @@ ENTRY_NP(arm11x6_idcache_wbinv_range)
/* Erratum ARM1176 371367 */
mrs r2, cpsr /* save the CPSR */
cpsid ifa /* disable interrupts (irq,fiq,abort) */
mov r3, #0
mov r3, #0
mcr p15, 0, r3, c13, c0, 0 /* write FCSE (uTLB invalidate) */
mcr p15, 0, r3, c7, c5, 4 /* flush prefetch buffer */
add r3, pc, #0x24
add r3, pc, #0x24
mcr p15, 0, r3, c7, c13, 1 /* prefetch I-cache line */
mcrr p15, 0, r1, r0, c5 /* invalidate I-cache range */
msr cpsr_cx, r2 /* local_irq_restore */
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
mcrr p15, 0, r1, r0, c14 /* clean and invalidate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
@ -199,7 +199,7 @@ END(arm11x6_idcache_wbinv_range)
/*
* Preload the cache before issuing the WFI by conditionally disabling the
* mcr intstructions the first time around the loop. Ensure the function is
* mcr intstructions the first time around the loop. Ensure the function is
* cacheline aligned.
*/
.arch armv6

View File

@ -130,7 +130,7 @@ ENTRY(arm9_dcache_wb_range)
bhi .Larm9_wb_next
mov pc, lr
END(arm9_dcache_wb_range)
ENTRY(arm9_dcache_wbinv_range)
ldr ip, .Larm9_line_size
cmp r1, #0x4000
@ -147,7 +147,7 @@ ENTRY(arm9_dcache_wbinv_range)
bhi .Larm9_wbinv_next
mov pc, lr
END(arm9_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.

View File

@ -37,7 +37,7 @@
/*
* $FreeBSD$
*/
#include <machine/asm.h>
.arch armv6
@ -69,7 +69,7 @@ ENTRY(armv6_dcache_wb_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_dcache_wb_range)
/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wbinv_range)
add r1, r1, r0
@ -78,7 +78,7 @@ ENTRY(armv6_dcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
END(armv6_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.

View File

@ -64,7 +64,7 @@ __FBSDID("$FreeBSD$");
#define PT_OUTER_WT (2 << 3)
#define PT_OUTER_WB (3 << 3)
#define PT_OUTER_WBWA (1 << 3)
#ifdef SMP
#define PT_ATTR (PT_S|PT_INNER_WBWA|PT_OUTER_WBWA|PT_NOS)
#else

View File

@ -135,7 +135,7 @@ __FBSDID("$FreeBSD$");
ldr r4, .Lblock_userspace_access ; \
ldr ip, [r4] ; \
orr r0, ip, #1 ; \
str r0, [r4]
str r0, [r4]
#define XSCALE_CACHE_CLEAN_UNBLOCK \
str ip, [r3] ; \
@ -217,7 +217,7 @@ ENTRY(xscalec3_cache_syncI_rng)
CPWAIT_AND_RETURN(r0)
END(xscalec3_cache_syncI_rng)
ENTRY(xscalec3_cache_purgeD_rng)
cmp r1, #0x4000

View File

@ -48,7 +48,7 @@ void
cpuinfo_init(void)
{
cpuinfo.midr = cp15_midr_get();
cpuinfo.midr = cp15_midr_get();
/* Test old version id schemes first */
if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
if (CPU_ID_ISOLD(cpuinfo.midr)) {
@ -74,7 +74,7 @@ cpuinfo_init(void)
/* non ARM -> must be new id scheme */
cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
}
}
/* Parse rest of MIDR */
cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
@ -86,11 +86,11 @@ cpuinfo_init(void)
cpuinfo.tlbtr = cp15_tlbtr_get();
cpuinfo.mpidr = cp15_mpidr_get();
cpuinfo.revidr = cp15_revidr_get();
/* if CPU is not v7 cpu id scheme */
if (cpuinfo.architecture != 0xF)
return;
cpuinfo.id_pfr0 = cp15_id_pfr0_get();
cpuinfo.id_pfr1 = cp15_id_pfr1_get();
cpuinfo.id_dfr0 = cp15_id_dfr0_get();

View File

@ -326,12 +326,9 @@ branch_taken(u_int insn, db_addr_t pc)
default:
break; /* XXX */
}
}
return (addr + offset);
}
case 0xa: /* b ... */
case 0xb: /* bl ... */
addr = ((insn << 2) & 0x03ffffff);

View File

@ -93,7 +93,7 @@ extern void sheeva_l2cache_wbinv_all(void);
#define cpu_idcache_wbinv_all armv7_idcache_wbinv_all
#define cpu_l2cache_wbinv_all()
#else
#define cpu_l2cache_wbinv_all()
#define cpu_l2cache_wbinv_all()
#endif
static void armadaxp_idcache_wbinv_all(void);
@ -216,7 +216,7 @@ _startC(void)
"mov sp, %1\n"
"mov pc, %0\n"
: : "r" (target_addr), "r" (tmp_sp));
}
#endif
#ifdef KZIP
@ -225,7 +225,7 @@ _startC(void)
sp += 2 * L1_TABLE_SIZE;
#endif
sp += 1024 * 1024; /* Should be enough for a stack */
__asm __volatile("adr %0, 2f\n"
"bic %0, %0, #0xff000000\n"
"and %1, %1, #0xff000000\n"
@ -366,7 +366,7 @@ get_cachetype_cp15()
static void
arm9_setup(void)
{
get_cachetype_cp15();
arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
@ -490,7 +490,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
vm_offset_t lastaddr = 0;
Elf_Addr ssym = 0;
Elf_Dyn *dp;
eh = (Elf32_Ehdr *)kstart;
ssym = 0;
entry_point = (void*)eh->e_entry;
@ -504,7 +504,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
lastaddr = phdr[i].p_vaddr - KERNVIRTADDR +
curaddr + phdr[i].p_memsz;
}
/* Save the symbol tables, as there're about to be scratched. */
memcpy(shdr, (void *)(kstart + eh->e_shoff),
sizeof(*shdr) * eh->e_shnum);
@ -552,12 +552,12 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
lastaddr = roundup(lastaddr,
sizeof(shdr[symstrindex].sh_size));
}
}
}
if (!d)
return ((void *)lastaddr);
j = eh->e_phnum;
for (i = 0; i < j; i++) {
volatile char c;
@ -610,7 +610,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))();
__asm __volatile(".globl func_end\n"
"func_end:");
/* NOTREACHED */
return NULL;
}
@ -653,7 +653,7 @@ setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend,
"mov r0, r0\n"
"sub pc, pc, #4\n" :
"=r" (tmp) : "r" (pd), "r" (domain));
/*
* XXX: This is the most stupid workaround I've ever wrote.
* For some reason, the KB9202 won't boot the kernel unless
@ -680,7 +680,7 @@ __start(void)
if (*kernel == 0x1f && kernel[1] == 0x8b) {
pt_addr = (((int)&_end + KERNSIZE + 0x100) &
~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
#ifdef CPU_ARM9
/* So that idcache_wbinv works; */
if ((cpufunc_id() & 0x0000f000) == 0x00009000)
@ -715,7 +715,7 @@ __start(void)
dst = (void *)(((vm_offset_t)dst & ~3));
pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
(vm_paddr_t)curaddr + 0x10000000, 0);
(vm_paddr_t)curaddr + 0x10000000, 0);
sp = pt_addr + L1_TABLE_SIZE + 8192;
sp = sp &~3;
dst = (void *)(sp + 4);

View File

@ -63,7 +63,7 @@ _C_LABEL(dtrace_invop_calltrap_addr):
.word 0
#endif
.text
.text
.align 2
/*
@ -123,7 +123,7 @@ _C_LABEL(dtrace_invop_calltrap_addr):
add sp, sp, #(4*17); /* Adjust the stack pointer */ \
ldr lr, [sp], #4; /* Pull the return address */ \
add sp, sp, #4 /* Align the stack */
#else
#else
#define PULLFRAME \
ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \
msr spsr_fsxc, r0; \
@ -226,7 +226,7 @@ _C_LABEL(dtrace_invop_calltrap_addr):
mov r0, r0; /* NOP for previous instruction */ \
add sp, sp, #(4*15); /* Adjust the stack pointer */ \
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
#else
#else
#define PULLFRAMEFROMSVCANDEXIT \
ldr r0, [sp], #4; /* Get the SPSR from stack */ \
msr spsr_fsxc, r0; /* restore SPSR */ \
@ -320,7 +320,7 @@ END(exception_exit)
* Entry point for a Prefetch Abort exception.
*
* The hardware switches to the abort mode stack; we switch to svc32 before
* calling the handler, then return directly to the original mode/stack
* calling the handler, then return directly to the original mode/stack
* on exit (without transitioning back through the abort mode stack).
*/
ASENTRY_NP(prefetch_abort_entry)
@ -340,7 +340,7 @@ END(prefetch_abort_entry)
* Entry point for a Data Abort exception.
*
* The hardware switches to the abort mode stack; we switch to svc32 before
* calling the handler, then return directly to the original mode/stack
* calling the handler, then return directly to the original mode/stack
* on exit (without transitioning back through the abort mode stack).
*/
ASENTRY_NP(data_abort_entry)
@ -360,7 +360,7 @@ END(data_abort_entry)
* Entry point for an Undefined Instruction exception.
*
* The hardware switches to the undefined mode stack; we switch to svc32 before
* calling the handler, then return directly to the original mode/stack
* calling the handler, then return directly to the original mode/stack
* on exit (without transitioning back through the undefined mode stack).
*/
ASENTRY_NP(undefined_entry)
@ -374,7 +374,7 @@ END(undefined_entry)
* Entry point for a normal IRQ.
*
* The hardware switches to the IRQ mode stack; we switch to svc32 before
* calling the handler, then return directly to the original mode/stack
* calling the handler, then return directly to the original mode/stack
* on exit (without transitioning back through the IRQ mode stack).
*/
ASENTRY_NP(irq_entry)
@ -383,12 +383,12 @@ ASENTRY_NP(irq_entry)
adr lr, exception_exit /* Return from handler via standard */
mov r0, sp /* exception exit routine. Pass the */
b _C_LABEL(arm_irq_handler)/* trapframe to the handler. */
END(irq_entry)
END(irq_entry)
/*
* Entry point for an FIQ interrupt.
*
* We don't currently support FIQ handlers very much. Something can
* We don't currently support FIQ handlers very much. Something can
* install itself in the FIQ vector using code (that may or may not work
* these days) in fiq.c. If nobody does that and an FIQ happens, this
* default handler just disables FIQs and otherwise ignores it.
@ -416,7 +416,7 @@ Laddr_exception_msg:
END(addr_exception_entry)
/*
* Entry point for the system Reset vector.
* Entry point for the system Reset vector.
* This should never happen, so panic.
*/
ASENTRY_NP(reset_entry)
@ -434,8 +434,8 @@ END(reset_entry)
* the ARM vectors page (high or low) as part of CPU initialization. The
* code that does the copy assumes that page0_data holds one 32-bit word
* of data for each of the predefined ARM vectors. It also assumes that
* page0_data follows the vectors in page0, but other stuff can appear
* between the two. We currently leave room between the two for some fiq
* page0_data follows the vectors in page0, but other stuff can appear
* between the two. We currently leave room between the two for some fiq
* handler code to be copied in.
*/
.global _C_LABEL(page0), _C_LABEL(page0_data)

View File

@ -59,7 +59,7 @@ extern uint32_t fiq_nullhandler_size;
* fiq_installhandler:
*
* Actually install the FIQ handler down at the FIQ vector.
*
*
* The FIQ vector is fixed by the hardware definition as the
* seventh 32-bit word in the vector page.
*

View File

@ -70,11 +70,11 @@ EENTRY_NP(casuword32)
adr r4, .Lcasuwordfault
str r4, [r3, #PCB_ONFAULT]
#if __ARM_ARCH >= 6
1:
1:
cmp r0, #KERNBASE
mvnhs r0, #0
bhs 2f
ldrex r5, [r0]
cmp r5, r1
movne r0, r5
@ -106,7 +106,7 @@ END(casuword)
str r0, [r3, #PCB_ONFAULT]
mvn r0, #0x00000000
ldmfd sp!, {r4, r5}
RET
RET
/*
* fuword(caddr_t uaddr);

View File

@ -197,8 +197,8 @@ gic_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt,
static u_int num_intr_cells;
if (num_intr_cells == 0) {
if (OF_searchencprop(OF_node_from_xref(iparent),
"#interrupt-cells", &num_intr_cells,
if (OF_searchencprop(OF_node_from_xref(iparent),
"#interrupt-cells", &num_intr_cells,
sizeof(num_intr_cells)) == -1) {
num_intr_cells = 1;
}

View File

@ -149,7 +149,7 @@ arm_setup_irqhandler(const char *name, driver_filter_t *filt,
if (error)
return;
intr_events[irq] = event;
snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN,
snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN,
"irq%d: %-*s", irq, INTRNAME_LEN - 1, name);
}
intr_event_add_handler(event, name, filt, hand, arg,
@ -164,7 +164,7 @@ arm_remove_irqhandler(int irq, void *cookie)
event = intr_events[irq];
arm_mask_irq(irq);
error = intr_event_remove_handler(cookie);
if (!TAILQ_EMPTY(&event->ie_handlers))

View File

@ -245,7 +245,7 @@ void
board_set_serial(uint64_t serial)
{
snprintf(board_serial, sizeof(board_serial)-1,
snprintf(board_serial, sizeof(board_serial)-1,
"%016jx", serial);
}
@ -297,7 +297,7 @@ sendsig(catcher, ksi, mask)
/* make room on the stack */
fp--;
/* make the stack aligned */
fp = (struct sigframe *)STACKALIGN(fp);
/* Populate the siginfo frame. */
@ -328,7 +328,7 @@ sendsig(catcher, ksi, mask)
* trampoline version numbers are coordinated with machine-
* dependent code in libc.
*/
tf->tf_r0 = sig;
tf->tf_r1 = (register_t)&fp->sf_si;
tf->tf_r2 = (register_t)&fp->sf_uc;
@ -430,7 +430,7 @@ cpu_startup(void *dummy)
/*
* Display the RAM layout.
*/
printf("real memory = %ju (%ju MB)\n",
printf("real memory = %ju (%ju MB)\n",
(uintmax_t)arm32_ptob(realmem),
(uintmax_t)arm32_ptob(realmem) / mbyte);
printf("avail memory = %ju (%ju MB)\n",
@ -492,7 +492,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
void
cpu_idle(int busy)
{
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
spinlock_enter();
#ifndef NO_EVENTTIMERS
@ -562,14 +562,14 @@ int
set_regs(struct thread *td, struct reg *regs)
{
struct trapframe *tf = td->td_frame;
bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
tf->tf_usr_sp = regs->r_sp;
tf->tf_usr_lr = regs->r_lr;
tf->tf_pc = regs->r_pc;
tf->tf_spsr &= ~PSR_FLAGS;
tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
return (0);
return (0);
}
int
@ -633,7 +633,7 @@ ptrace_single_step(struct thread *td)
{
struct proc *p;
int error;
/* TODO: This needs to be updated for Thumb-2 */
if ((td->td_frame->tf_spsr & PSR_T) != 0)
return (EINVAL);
@ -812,7 +812,7 @@ sys_sigreturn(td, uap)
{
ucontext_t uc;
int spsr;
if (uap == NULL)
return (EFAULT);
if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
@ -1186,7 +1186,7 @@ initarm(struct arm_boot_params *abp)
/* Grab reserved memory regions information from device tree. */
if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
EXFLAG_NODUMP | EXFLAG_NOALLOC);
/* Platform-specific initialisation */
@ -1392,7 +1392,7 @@ initarm(struct arm_boot_params *abp)
*
* Prepare the list of physical memory available to the vm subsystem.
*/
arm_physmem_exclude_region(abp->abp_physaddr,
arm_physmem_exclude_region(abp->abp_physaddr,
(virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
arm_physmem_init_kernel_globals();

View File

@ -222,7 +222,7 @@ init_secondary(int cpu)
end = IPI_IRQ_START;
#endif
#endif
for (int i = start; i <= end; i++)
arm_unmask_irq(i);
enable_interrupts(PSR_I);
@ -344,7 +344,7 @@ release_aps(void *dummy __unused)
/*
* IPI handler
*/
/*
/*
* Use 0xdeadbeef as the argument value for irq 0,
* if we used 0, the intr code will give the trap frame
* pointer instead.

View File

@ -414,7 +414,7 @@ arm_tmr_attach(device_t dev)
if (!arm_tmr_freq_varies)
tc_err = attach_tc(sc);
else if (bootverbose)
device_printf(sc->dev,
device_printf(sc->dev,
"not using variable-frequency device as timecounter");
sc->memrid++;
sc->irqrid++;
@ -488,7 +488,7 @@ arm_tmr_change_frequency(uint64_t newfreq)
* @usec: number of microseconds to delay by
*
* This function is called all over the kernel and is suppose to provide a
* consistent delay. This function may also be called before the console
* consistent delay. This function may also be called before the console
* is setup so no printf's can be called here.
*
* RETURNS:

View File

@ -127,7 +127,7 @@ static driver_t nexus_driver = {
nexus_methods,
1 /* no softc */
};
EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0,
EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0,
BUS_PASS_BUS + BUS_PASS_ORDER_EARLY);
static int
@ -236,7 +236,7 @@ nexus_release_resource(device_t bus, device_t child, int type, int rid,
struct resource *res)
{
int error;
if (rman_get_flags(res) & RF_ACTIVE) {
error = bus_deactivate_resource(child, type, rid, res);
if (error)

View File

@ -62,7 +62,7 @@ static size_t hwcnt;
static size_t excnt;
/*
* These "avail lists" are globals used to communicate physical memory layout to
* These "avail lists" are globals used to communicate physical memory layout to
* other parts of the kernel. Within the arrays, each value is the starting
* address of a contiguous area of physical address space. The values at even
* indexes are areas that contain usable memory and the values at odd indexes

View File

@ -121,7 +121,7 @@ pl190_intc_attach(device_t dev)
id = 0;
for (i = 3; i >= 0; i--) {
id = (id << 8) |
id = (id << 8) |
(intc_vic_read_4(VICPERIPHID + i*4) & 0xff);
}
@ -129,7 +129,7 @@ pl190_intc_attach(device_t dev)
id = 0;
for (i = 3; i >= 0; i--) {
id = (id << 8) |
id = (id << 8) |
(intc_vic_read_4(VICPRIMECELLID + i*4) & 0xff);
}
@ -152,7 +152,7 @@ static driver_t pl190_intc_driver = {
static devclass_t pl190_intc_devclass;
EARLY_DRIVER_MODULE(intc, simplebus, pl190_intc_driver, pl190_intc_devclass,
EARLY_DRIVER_MODULE(intc, simplebus, pl190_intc_driver, pl190_intc_devclass,
0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
int
@ -164,7 +164,7 @@ arm_get_next_irq(int last_irq)
/* Sanity check */
if (irq < 0)
irq = 0;
pending = intc_vic_read_4(VICIRQSTATUS);
while (irq < VIC_NIRQS) {
if (pending & (1 << irq))

View File

@ -50,11 +50,11 @@ __FBSDID("$FreeBSD$");
/*
* Define this if you need to disable PL310 for debugging purpose
* Spec:
* Spec:
* http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
*/
/*
/*
* Hardcode errata for now
* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
*/
@ -137,7 +137,7 @@ pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
{
uint32_t v;
KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
which_reg == PL310_DATA_RAM_CTRL,
("bad pl310 ram latency register address"));
@ -191,7 +191,7 @@ pl310_wait_background_op(uint32_t off, uint32_t mask)
/**
* pl310_cache_sync - performs a cache sync operation
*
*
* According to the TRM:
*
* "Before writing to any other register you must perform an explicit
@ -231,7 +231,7 @@ pl310_wbinv_all(void)
for (i = 0; i < g_ways_assoc; i++) {
for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
pl310_write4(pl310_softc,
pl310_write4(pl310_softc,
PL310_CLEAN_INV_LINE_IDX,
(i << 28 | j << 5));
}
@ -278,8 +278,8 @@ pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
while (size > 0) {
#ifdef PL310_ERRATA_588369
if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
/*
* Errata 588369 says that clean + inv may keep the
/*
* Errata 588369 says that clean + inv may keep the
* cache line if it was clean, the recommanded
* workaround is to clean then invalidate the cache
* line, with write-back and cache linefill disabled.
@ -402,10 +402,10 @@ pl310_config_intr(void *arg)
pl310_filter, NULL, sc, &sc->sc_irq_h);
/* Cache Line Eviction for Counter 0 */
pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
/* Data Read Request for Counter 1 */
pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
/* Enable and clear pending interrupts */
@ -413,9 +413,9 @@ pl310_config_intr(void *arg)
pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
/* Enable counters and reset C0 and C1 */
pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
EVENT_COUNTER_CTRL_ENABLED |
EVENT_COUNTER_CTRL_C0_RESET |
pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
EVENT_COUNTER_CTRL_ENABLED |
EVENT_COUNTER_CTRL_C0_RESET |
EVENT_COUNTER_CTRL_C1_RESET);
config_intrhook_disestablish(sc->sc_ich);
@ -426,7 +426,7 @@ pl310_config_intr(void *arg)
static int
pl310_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
@ -444,7 +444,7 @@ pl310_attach(device_t dev)
sc->sc_dev = dev;
rid = 0;
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->sc_mem_res == NULL)
panic("%s: Cannot map registers", device_get_name(dev));
@ -501,7 +501,7 @@ pl310_attach(device_t dev)
pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
pl310_wait_background_op(PL310_INV_WAY, 0xffff);
platform_pl310_write_ctrl(sc, CTRL_ENABLED);
device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
(g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
if (bootverbose)
pl310_print_config(sc);

View File

@ -2194,7 +2194,7 @@ pmap_release(pmap_t pmap)
pmap->pm_stats.resident_count));
KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab),
("%s: has allocated user PT2(s)", __func__));
KASSERT(CPU_EMPTY(&pmap->pm_active),
KASSERT(CPU_EMPTY(&pmap->pm_active),
("%s: pmap %p is active on some CPU(s)", __func__, pmap));
mtx_lock_spin(&allpmaps_lock);
@ -5958,7 +5958,7 @@ pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;
@ -5976,7 +5976,7 @@ pmap_kremove_device(vm_offset_t va, vm_size_t size)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;

View File

@ -1145,7 +1145,7 @@ vector_page_setprot(int prot)
/*
* Set referenced flag.
* Vectors' page is always desired
* to be allowed to reside in TLB.
* to be allowed to reside in TLB.
*/
*ptep |= L2_S_REF;
@ -2221,7 +2221,7 @@ pmap_remove_pages(pmap_t pmap)
vm_offset_t va;
uint32_t inuse, bitmask;
int allfree, bit, field, idx;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@ -2455,7 +2455,7 @@ pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;
@ -2472,7 +2472,7 @@ pmap_kremove_device(vm_offset_t va, vm_size_t size)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;
@ -3093,7 +3093,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((pve = pmap_remove_pv(om, pmap, va))) {
is_exec |= PTE_BEEN_EXECD(opte);
is_refd |= PTE_BEEN_REFD(opte);
if (m && ((m->oflags & VPO_UNMANAGED)))
pmap_free_pv_entry(pmap, pve);
}
@ -3303,7 +3303,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t next_bucket;
vm_paddr_t pa;
vm_page_t m;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (sva < eva) {
@ -3572,7 +3572,7 @@ pmap_pv_insert_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
pv_entry_t pv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
if (pv_entry_count < pv_entry_high_water &&
if (pv_entry_count < pv_entry_high_water &&
(pv = pmap_get_pv_entry(pmap, TRUE)) != NULL) {
pv->pv_va = va;
pvh = pa_to_pvh(pa);
@ -3661,7 +3661,7 @@ pmap_pv_promote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
* Tries to create a 1MB page mapping. Returns TRUE if successful and
* FALSE otherwise. Fails if (1) page is unmanageg, kernel pmap or vectors
* page, (2) a mapping already exists at the specified virtual address, or
* (3) a pv entry cannot be allocated without reclaiming another pv entry.
* (3) a pv entry cannot be allocated without reclaiming another pv entry.
*/
static boolean_t
pmap_enter_section(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
@ -3692,7 +3692,7 @@ pmap_enter_section(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
" in pmap %p", va, pmap);
return (FALSE);
}
pa = VM_PAGE_TO_PHYS(m);
pa = VM_PAGE_TO_PHYS(m);
/*
* Abort this mapping if its PV entry could not be created.
*/
@ -3767,7 +3767,7 @@ pmap_remove_section(pmap_t pmap, vm_offset_t sva)
TAILQ_EMPTY(&pvh->pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
}
l2b = pmap_get_l2_bucket(pmap, sva);
if (l2b != NULL) {
KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL,
@ -3879,8 +3879,8 @@ pmap_promote_section(pmap_t pmap, vm_offset_t va)
if (!L2_S_EXECUTABLE(firstpte))
prot &= ~VM_PROT_EXECUTE;
/*
* Examine each of the other PTEs in the specified l2_bucket.
/*
* Examine each of the other PTEs in the specified l2_bucket.
* Abort if this PTE maps an unexpected 4KB physical page or
* does not have identical characteristics to the first PTE.
*/
@ -4000,7 +4000,7 @@ pmap_demote_section(pmap_t pmap, vm_offset_t va)
/*
* According to assumptions described in pmap_promote_section,
* kernel is and always should be mapped using 1MB section mappings.
* What more, managed kernel pages were not to be promoted.
* What more, managed kernel pages were not to be promoted.
*/
KASSERT(pmap != pmap_kernel() && L1_IDX(va) != L1_IDX(vector_page),
("pmap_demote_section: forbidden section mapping"));
@ -4010,7 +4010,7 @@ pmap_demote_section(pmap_t pmap, vm_offset_t va)
l1pd = *pl1pd;
KASSERT((l1pd & L1_TYPE_MASK) == L1_S_PROTO,
("pmap_demote_section: not section or invalid section"));
pa = l1pd & L1_S_FRAME;
m = PHYS_TO_VM_PAGE(pa);
KASSERT((m != NULL && (m->oflags & VPO_UNMANAGED) == 0),
@ -4063,7 +4063,7 @@ pmap_demote_section(pmap_t pmap, vm_offset_t va)
/*
* If the mapping has changed attributes, update the page table
* entries.
*/
*/
if ((*firstptep & L2_S_PROMOTE) != (L1_S_DEMOTE(l1pd)))
pmap_fill_l2b(l2b, newpte);
}
@ -4493,7 +4493,7 @@ pmap_zero_page_gen(vm_page_t m, int off, int size)
{
struct czpages *czp;
KASSERT(TAILQ_EMPTY(&m->md.pv_list),
KASSERT(TAILQ_EMPTY(&m->md.pv_list),
("pmap_zero_page_gen: page has mappings"));
vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
@ -4501,7 +4501,7 @@ pmap_zero_page_gen(vm_page_t m, int off, int size)
sched_pin();
czp = &cpu_czpages[PCPU_GET(cpuid)];
mtx_lock(&czp->lock);
/*
* Hook in the page, zero it.
*/
@ -4589,7 +4589,7 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
sched_pin();
czp = &cpu_czpages[PCPU_GET(cpuid)];
mtx_lock(&czp->lock);
/*
* Map the pages into the page hook points, copy them, and purge the
* cache for the appropriate page.
@ -5338,7 +5338,7 @@ pmap_dmap_iscurrent(pmap_t pmap)
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
/*
/*
* Remember the memattr in a field that gets used to set the appropriate
* bits in the PTEs as mappings are established.
*/

View File

@ -1005,7 +1005,7 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
ptep = &l2b->l2b_kva[l2pte_index(va)];
pte = *ptep;
if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
/*
* Page tables must have the cache-mode set to
@ -1357,7 +1357,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
if (!(oflags & maskbits)) {
if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
if (pg->md.pv_memattr !=
if (pg->md.pv_memattr !=
VM_MEMATTR_UNCACHEABLE) {
PMAP_LOCK(pm);
l2b = pmap_get_l2_bucket(pm, va);
@ -1955,7 +1955,7 @@ pmap_postinit(void)
pt_entry_t *ptep, pte;
vm_offset_t va, eva;
u_int loop, needed;
needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
needed -= 1;
l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
@ -1970,7 +1970,7 @@ pmap_postinit(void)
eva = va + L1_TABLE_SIZE;
pl1pt = (pd_entry_t *)va;
while (va < eva) {
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
ptep = &l2b->l2b_kva[l2pte_index(va)];
@ -1979,7 +1979,7 @@ pmap_postinit(void)
*ptep = pte;
PTE_SYNC(ptep);
cpu_tlb_flushD_SE(va);
va += PAGE_SIZE;
}
pmap_init_l1(l1, pl1pt);
@ -2162,11 +2162,11 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n",
firstaddr, vm_max_kernel_address));
virtual_avail = firstaddr;
kernel_pmap->pm_l1 = l1;
kernel_l1pa = l1pt->pv_pa;
/*
* Scan the L1 translation table created by initarm() and create
* the required metadata for all valid mappings found in it.
@ -2187,7 +2187,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
*/
pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
ptep = (pt_entry_t *)kernel_pt_lookup(pa);
if (ptep == NULL) {
panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
(u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
@ -2241,7 +2241,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
}
}
/*
* Ensure the primary (kernel) L1 has the correct cache mode for
* a page table. Bitch if it is not correctly set.
@ -2267,7 +2267,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
* Initialize the global pv list lock.
*/
rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE);
/*
* Reserve some special page table entries/VA space for temporary
* mapping of pages.
@ -2281,7 +2281,7 @@ pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
pmap_alloc_specials(&virtual_avail,
round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
&pmap_kernel_l2ptp_kva, NULL);
size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
pmap_alloc_specials(&virtual_avail,
round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
@ -2319,7 +2319,7 @@ void
pmap_release(pmap_t pmap)
{
struct pcb *pcb;
pmap_idcache_wbinv_all(pmap);
cpu_l2cache_wbinv_all();
pmap_tlb_flushID(pmap);
@ -2352,7 +2352,7 @@ pmap_release(pmap_t pmap)
}
pmap_free_l1(pmap);
dprintf("pmap_release()\n");
}
@ -2368,7 +2368,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
pt_entry_t *ptep;
vm_paddr_t pa;
struct vm_page *pg;
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
if (pg == NULL)
return (1);
@ -2530,7 +2530,7 @@ pmap_remove_pages(pmap_t pmap)
struct l2_bucket *l2b = NULL;
vm_page_t m;
pt_entry_t *pt;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
cpu_idcache_wbinv_all();
@ -2576,7 +2576,7 @@ pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags)
pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) |
(((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL,
VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
struct l1_ttable *l1;
struct l1_ttable *l1;
vm_offset_t va0, va_end;
KASSERT(((va | pa) & L1_SUP_OFFSET) == 0,
@ -2685,7 +2685,7 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
rw_wlock(&pvh_global_lock);
if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) {
if ((pve = pmap_get_pv_entry()) == NULL)
panic("pmap_kenter_internal: no pv entries");
panic("pmap_kenter_internal: no pv entries");
PMAP_LOCK(pmap_kernel());
pmap_enter_pv(m, pve, pmap_kernel(), va,
PVF_WRITE | PVF_UNMAN);
@ -2716,7 +2716,7 @@ pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;
@ -2733,7 +2733,7 @@ pmap_kremove_device(vm_offset_t va, vm_size_t size)
{
vm_offset_t sva;
KASSERT((size & PAGE_MASK) == 0,
KASSERT((size & PAGE_MASK) == 0,
("%s: device mapping not page-sized", __func__));
sva = va;
@ -2775,7 +2775,7 @@ pmap_kremove(vm_offset_t va)
struct pv_entry *pve;
vm_page_t m;
vm_offset_t pa;
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
if (!l2b)
return;
@ -3287,9 +3287,9 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* so no need to re-do referenced emulation here.
*/
npte |= L2_S_PROTO;
nflags |= PVF_REF;
if (m && ((prot & VM_PROT_WRITE) != 0 ||
(m->md.pvh_attrs & PVF_MOD))) {
/*
@ -3310,7 +3310,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
npte |= L2_TYPE_INV;
}
if (prot & VM_PROT_WRITE) {
npte |= L2_S_PROT_W;
if (m != NULL &&
@ -3326,7 +3326,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
oflags = pmap_modify_pv(m, pmap, va,
PVF_WRITE | PVF_EXEC | PVF_WIRED |
PVF_MOD | PVF_REF, nflags);
/*
* We may need to flush the cache if we're
* doing rw-ro...
@ -3544,7 +3544,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pv_entry_t pv;
vm_offset_t next_bucket;
vm_page_t m;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (sva < eva) {
@ -3690,7 +3690,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
}
} else {
/*
* Note that we can't rely on the validity of the L1
@ -3717,13 +3717,13 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
case L2_TYPE_L:
pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
break;
default:
pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
break;
}
if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
goto retry;
goto retry;
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
}
@ -3743,12 +3743,12 @@ int
pmap_pinit(pmap_t pmap)
{
PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
pmap_alloc_l1(pmap);
bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
CPU_ZERO(&pmap->pm_active);
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
@ -3783,7 +3783,7 @@ static pv_entry_t
pmap_get_pv_entry(void)
{
pv_entry_t ret_value;
pv_entry_count++;
if (pv_entry_count > pv_entry_high_water)
pagedaemon_wakeup();
@ -4353,7 +4353,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
pv_entry_t pv;
int loops = 0;
boolean_t rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
@ -4425,7 +4425,7 @@ pmap_is_modified(vm_page_t m)
("pmap_is_modified: page %p is not managed", m));
if (m->md.pvh_attrs & PVF_MOD)
return (TRUE);
return(FALSE);
}
@ -4631,7 +4631,7 @@ pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
}
/*
@ -4793,7 +4793,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
/*
/*
* Remember the memattr in a field that gets used to set the appropriate
* bits in the PTEs as mappings are established.
*/

View File

@ -430,7 +430,7 @@ EENTRY(memmove)
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemmove_floop32:
.Lmemmove_floop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}
@ -988,7 +988,7 @@ ENTRY(memcpy)
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemcpy_loop32:
.Lmemcpy_loop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}

View File

@ -143,7 +143,7 @@ ENTRY(cpu_throw)
GET_PCPU(r7, r9)
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
/* Switch to lwp0 context */
ldr r9, .Lcpufuncs
@ -277,14 +277,14 @@ ENTRY(cpu_switch)
ldr r9, [r1, #(TD_MD + MD_RAS_END)]
str r9, [r3, #8]
#else
/*
* Set new tp. No need to store the old one first, userland can't
/*
* Set new tp. No need to store the old one first, userland can't
* change it directly on armv6.
*/
ldr r9, [r1, #(TD_MD + MD_TP)]
mcr p15, 0, r9, c13, c0, 3
#endif
/* Get the user structure for the new process in r9 */
ldr r9, [r1, #(TD_PCB)]
@ -407,7 +407,7 @@ ENTRY(cpu_switch)
cmp r4, r6
beq 1b
#endif
/* XXXSCW: Safe to re-enable FIQs here */
/* rem: r9 = new PCB */
@ -794,7 +794,7 @@ END(cpu_switch)
ENTRY(savectx)
stmfd sp!, {lr}
sub sp, sp, #4
/* Store all the registers in the thread's pcb */
add r3, r0, #(PCB_R4)
stmia r3, {r4-r12, sp, lr, pc}

View File

@ -119,7 +119,7 @@ struct ksig {
u_long code;
};
struct data_abort {
int (*func)(struct trapframe *, u_int, u_int, struct thread *,
int (*func)(struct trapframe *, u_int, u_int, struct thread *,
struct ksig *);
const char *desc;
};
@ -763,4 +763,4 @@ badaddr_read(void *addr, size_t size, void *rptr)
/* Return EFAULT if the address was invalid, else zero */
return (rv);
}
}

View File

@ -107,7 +107,7 @@ vfp_init(void)
coproc = get_coprocessorACR();
coproc |= COPROC10 | COPROC11;
set_coprocessorACR(coproc);
fpsid = fmrx(fpsid); /* read the vfp system id */
fpexc = fmrx(fpexc); /* read the vfp exception reg */

View File

@ -108,10 +108,10 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
#endif
#endif
td2->td_pcb = pcb2;
/* Clone td1's pcb */
bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
/* Point to mdproc and then copy over td1's contents */
mdp2 = &p2->p_md;
bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
@ -133,7 +133,7 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
pcb2->pcb_vfpcpu = -1;
pcb2->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ;
tf = td2->td_frame;
tf->tf_spsr &= ~PSR_C;
tf->tf_r0 = 0;
@ -149,7 +149,7 @@ cpu_fork(register struct thread *td1, register struct proc *p2,
td2->td_md.md_tp = td1->td_md.md_tp;
#endif
}
void
cpu_thread_swapin(struct thread *td)
{
@ -336,7 +336,7 @@ cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
void
swi_vm(void *dummy)
{
if (busdma_swi_pending)
busdma_swi();
}