Remove more unused code and declarations, and add dire warnings to the 64-bit

atomic ops used by 32-bit kernels.
This commit is contained in:
Juli Mallett 2012-03-12 08:13:04 +00:00
parent b6f97155cc
commit c8b31c8f20
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=232855
9 changed files with 9 additions and 241 deletions

View File

@ -217,7 +217,6 @@ do { \
struct mips_cpuinfo;
void mips_config_cache(struct mips_cpuinfo *);
void mips_dcache_compute_align(void);
#include <machine/cache_mipsNN.h>
#endif /* _MACHINE_CACHE_H_ */

View File

@ -468,6 +468,5 @@ void insl(uint32_t *, uint32_t *,int);
void outsb(uint8_t *, const uint8_t *,int);
void outsw(uint16_t *, const uint16_t *,int);
void outsl(uint32_t *, const uint32_t *,int);
u_int loadandclear(volatile u_int *addr);
#endif /* !_MACHINE_CPUFUNC_H_ */

View File

@ -124,10 +124,4 @@ struct trapframe {
register_t fdummy;
};
/* REVISIT */
struct frame *get_current_fp(void);
#define get_next_fp(fp) (0)
#define get_return_ptr(fp) (0)
void get_stack_trace(u_int32_t depth, u_int32_t *trace);
#endif /* !_MACHINE_FRAME_H_ */

View File

@ -54,12 +54,9 @@ extern vm_offset_t kernel_kseg0_end;
void MipsSaveCurFPState(struct thread *);
void fork_trampoline(void);
void cpu_swapin(struct proc *);
uintptr_t MipsEmulateBranch(struct trapframe *, uintptr_t, int, uintptr_t);
void MipsSwitchFPState(struct thread *, struct trapframe *);
u_long kvtop(void *addr);
int is_cacheable_mem(vm_paddr_t addr);
void mips_generic_reset(void);
void mips_wait(void);
#define MIPS_DEBUG 0

View File

@ -163,7 +163,6 @@ void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr);
void pmap_kremove(vm_offset_t va);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);
void pmap_kenter_temporary_free(vm_paddr_t pa);
int pmap_compute_pages_to_dump(void);
void pmap_flush_pvcache(vm_page_t m);
int pmap_emulate_modified(pmap_t pmap, vm_offset_t va);
void pmap_grow_direct_page_cache(void);

View File

@ -80,11 +80,6 @@ struct mdproc {
};
#ifdef _KERNEL
struct thread;
void mips_cpu_switch(struct thread *, struct thread *, struct mtx *);
void mips_cpu_throw(struct thread *, struct thread *);
struct syscall_args {
u_int code;
struct sysent *callp;

View File

@ -397,17 +397,6 @@ mips_postboot_fixup(void)
#endif
}
/*
* Many SoCs have a means to reset the core itself. Others do not, or
* the method is unknown to us. For those cases, we jump to the mips
* reset vector and hope for the best. This works well in practice.
*/
void
mips_generic_reset()
{
((void(*)(void))MIPS_RESET_EXC_VEC)();
}
#ifdef SMP
void
mips_pcpu_tlb_init(struct pcpu *pcpu)

View File

@ -940,154 +940,6 @@ LEAF(ffs)
nop
END(ffs)
LEAF(get_current_fp)
j ra
move v0, s8
END(get_current_fp)
LEAF(loadandclear)
.set noreorder
1:
ll v0, 0(a0)
move t0, zero
sc t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(loadandclear)
#if 0
/*
* u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
* Atomically compare the value stored at p with cmpval
* and if the two values are equal, update value *p with
* newval. Return zero if compare failed, non-zero otherwise
*
*/
LEAF(atomic_cmpset_32)
.set noreorder
1:
ll t0, 0(a0)
move v0, zero
bne t0, a1, 2f
move t1, a2
sc t1, 0(a0)
beq t1, zero, 1b
or v0, v0, 1
2:
j ra
nop
END(atomic_cmpset_32)
/**
* u_int32_t
* atomic_readandclear_32(u_int32_t *a)
* {
* u_int32_t retval;
* retval = *a;
* *a = 0;
* }
*/
LEAF(atomic_readandclear_32)
.set noreorder
1:
ll t0, 0(a0)
move t1, zero
move v0, t0
sc t1, 0(a0)
beq t1, zero, 1b
nop
j ra
nop
END(atomic_readandclear_32)
/**
* void
* atomic_set_32(u_int32_t *a, u_int32_t b)
* {
* *a |= b;
* }
*/
LEAF(atomic_set_32)
.set noreorder
1:
ll t0, 0(a0)
or t0, t0, a1
sc t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(atomic_set_32)
/**
* void
* atomic_add_32(uint32_t *a, uint32_t b)
* {
* *a += b;
* }
*/
LEAF(atomic_add_32)
.set noreorder
srl a0, a0, 2 # round down address to be 32-bit aligned
sll a0, a0, 2
1:
ll t0, 0(a0)
addu t0, t0, a1
sc t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(atomic_add_32)
/**
* void
* atomic_clear_32(u_int32_t *a, u_int32_t b)
* {
* *a &= ~b;
* }
*/
LEAF(atomic_clear_32)
.set noreorder
srl a0, a0, 2 # round down address to be 32-bit aligned
sll a0, a0, 2
nor a1, zero, a1
1:
ll t0, 0(a0)
and t0, t0, a1 # t1 has the new lower 16 bits
sc t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(atomic_clear_32)
/**
* void
* atomic_subtract_32(uint16_t *a, uint16_t b)
* {
* *a -= b;
* }
*/
LEAF(atomic_subtract_32)
.set noreorder
srl a0, a0, 2 # round down address to be 32-bit aligned
sll a0, a0, 2
1:
ll t0, 0(a0)
subu t0, t0, a1
sc t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(atomic_subtract_32)
#endif
/**
* void
* atomic_set_16(u_int16_t *a, u_int16_t b)
@ -1259,6 +1111,15 @@ END(atomic_subtract_8)
* NOPs in it for all processors. XXX
*
* Maybe it would be better to just leave this undefined in that case.
*
* XXX These routines are not safe in the case of a TLB miss on a1 or
* a0 unless the trapframe is 64-bit, which it just isn't with O32.
* If we take any exception, not just an interrupt, the upper
* 32-bits will be clobbered. Use only N32 and N64 kernels if you
* want to use 64-bit registers while interrupts are enabled or
* with memory operations. Since this isn't even using load-linked
* and store-conditional, perhaps it should just use two registers
* instead, as is right and good with the O32 ABI.
*/
LEAF(atomic_store_64)
mfc0 t1, MIPS_COP_0_STATUS
@ -1455,58 +1316,6 @@ LEAF(casuptr)
j ra
END(casuptr)
#ifdef CPU_CNMIPS
/*
* void octeon_enable_shadow(void)
* turns on access to CC and CCRes
*/
LEAF(octeon_enable_shadow)
li t1, 0x0000000f
mtc0 t1, MIPS_COP_0_INFO
jr ra
nop
END(octeon_enable_shadow)
LEAF(octeon_get_shadow)
mfc0 v0, MIPS_COP_0_INFO
jr ra
nop
END(octeon_get_shadow)
/*
* octeon_set_control(addr, uint32_t val)
*/
LEAF(octeon_set_control)
.set push
or t1, a1, zero
/* dmfc0 a1, 9, 7*/
.word 0x40254807
sd a1, 0(a0)
or a1, t1, zero
/* dmtc0 a1, 9, 7*/
.word 0x40a54807
jr ra
nop
.set pop
END(octeon_set_control)
/*
* octeon_get_control(addr)
*/
LEAF(octeon_get_control)
.set push
.set mips64r2
/* dmfc0 a1, 9, 7 */
.word 0x40254807
sd a1, 0(a0)
jr ra
nop
.set pop
END(octeon_get_control)
#endif
LEAF(mips3_ld)
.set push
.set noreorder

View File

@ -478,19 +478,6 @@ cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
* that are needed.
*/
}
/*
* Convert kernel VA to physical address
*/
u_long
kvtop(void *addr)
{
vm_offset_t va;
va = pmap_kextract((vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((intptr_t)va);
}
/*
* Implement the pre-zeroed page mechanism.