Fix bzero() for 64-bit.
The existing implementation of bzero incorrectly clears bytes when the start address is not word aligned. Fix it by using REG_SHI macro which works on both 32 and 64 bit. Submitted by: Artem Belevich (fbsdlist at src cx)
This commit is contained in:
parent
a8ca1e2aac
commit
33cfbb02c4
@ -58,27 +58,9 @@ LEAF(bzero)
|
||||
PTR_SUBU a3, zero, a0 # compute # bytes to word align address
|
||||
and a3, a3, SZREG-1
|
||||
beq a3, zero, 1f # skip if word aligned
|
||||
#if SZREG == 4
|
||||
PTR_SUBU a1, a1, a3 # subtract from remaining count
|
||||
SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
|
||||
REG_SHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
|
||||
PTR_ADDU a0, a0, a3
|
||||
#endif
|
||||
#if SZREG == 8
|
||||
PTR_SUBU a1, a1, a3 # subtract from remaining count
|
||||
PTR_ADDU a0, a0, a3 # align dst to next word
|
||||
sll a3, a3, 3 # bits to bytes
|
||||
li a2, -1 # make a mask
|
||||
#if _BYTE_ORDER == _BIG_ENDIAN
|
||||
REG_SRLV a2, a2, a3 # we want to keep the MSB bytes
|
||||
#endif
|
||||
#if _BYTE_ORDER == _LITTLE_ENDIAN
|
||||
REG_SLLV a2, a2, a3 # we want to keep the LSB bytes
|
||||
#endif
|
||||
nor a2, zero, a2 # complement the mask
|
||||
REG_L v0, -SZREG(a0) # load the word to partially clear
|
||||
and v0, v0, a2 # clear the bytes
|
||||
REG_S v0, -SZREG(a0) # store it back
|
||||
#endif
|
||||
1:
|
||||
and v0, a1, SZREG-1 # compute number of words left
|
||||
PTR_SUBU a3, a1, v0
|
||||
|
Loading…
Reference in New Issue
Block a user