MFC: r180298; r180299
- Fix spelling and style. - Use __FBSDID. - Merge macros depending on the flags being preserved between calls into a single "__asm"-statement as GCC doesn't guarantee their consecutive output even when using consecutive "__asm __volatile"- statement for them. Remove the otherwise unnecessary "__volatile". [1] - The inline assembler instructions used here alter the condition codes so add them to the clobber list accordingly. - The inline assembler instructions used here uses output operands before all input operands are consumed so add appropriate modifiers. Pointed out by: bde [1]
This commit is contained in:
parent
ca959f9247
commit
906afc5035
@ -53,10 +53,11 @@
|
|||||||
* from tahoe: in_cksum.c 1.2 86/01/05
|
* from tahoe: in_cksum.c 1.2 86/01/05
|
||||||
* from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
|
* from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
|
||||||
* from: FreeBSD: src/sys/i386/i386/in_cksum.c,v 1.22 2000/11/25
|
* from: FreeBSD: src/sys/i386/i386/in_cksum.c,v 1.22 2000/11/25
|
||||||
*
|
|
||||||
* $FreeBSD$
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <sys/cdefs.h>
|
||||||
|
__FBSDID("$FreeBSD$");
|
||||||
|
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
#include <sys/systm.h>
|
#include <sys/systm.h>
|
||||||
#include <sys/mbuf.h>
|
#include <sys/mbuf.h>
|
||||||
@ -73,10 +74,10 @@
|
|||||||
* This routine is very heavily used in the network
|
* This routine is very heavily used in the network
|
||||||
* code and should be modified for each CPU to be as fast as possible.
|
* code and should be modified for each CPU to be as fast as possible.
|
||||||
*
|
*
|
||||||
* This implementation is a sparc64 version. Most code was taken over and
|
* This implementation is a sparc64 version. Most code was taken over
|
||||||
* adapted from the i386. Some optimizations were changed to achieve (hopefully)
|
* and adapted from the i386. Some optimizations were changed to achieve
|
||||||
* better performance.
|
* (hopefully) better performance.
|
||||||
* This uses 64 bit loads, but 32 bit additions due to the lack of a 64-bit
|
* This uses 64-bit loads, but 32-bit additions due to the lack of a 64-bit
|
||||||
* add-with-carry operation.
|
* add-with-carry operation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -84,29 +85,29 @@
|
|||||||
* REDUCE() is actually not used that frequently... maybe a C implementation
|
* REDUCE() is actually not used that frequently... maybe a C implementation
|
||||||
* would suffice.
|
* would suffice.
|
||||||
*/
|
*/
|
||||||
#define REDUCE(sum, tmp) __asm __volatile( \
|
#define REDUCE(sum, tmp) __asm( \
|
||||||
"sll %2, 16, %1\n" \
|
"sll %2, 16, %1\n" \
|
||||||
"addcc %2, %1, %0\n" \
|
"addcc %2, %1, %0\n" \
|
||||||
"srl %0, 16, %0\n" \
|
"srl %0, 16, %0\n" \
|
||||||
"addc %0, 0, %0" : "=r" (sum), "=r" (tmp) : "0" (sum))
|
"addc %0, 0, %0" : "=r" (sum), "=&r" (tmp) : "0" (sum) : "cc")
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that some of these macros depend on the flags being preserved between
|
* Note that some of these macros depend on the flags being preserved
|
||||||
* calls, so they should not be intermixed with other C statements.
|
* between calls, thus they have to be used within a single __asm().
|
||||||
*/
|
*/
|
||||||
#define LD64_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \
|
#define LD64_ADD32(n, mod) \
|
||||||
"ldx [%3 + " #n "], %1\n" \
|
"ldx [%3 + " #n "], %1\n" \
|
||||||
"add" #mod " %2, %1, %0\n" \
|
"add" #mod " %2, %1, %0\n" \
|
||||||
"srlx %1, 32, %1\n" \
|
"srlx %1, 32, %1\n" \
|
||||||
"addccc %0, %1, %0" : "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr))
|
"addccc %0, %1, %0\n"
|
||||||
|
|
||||||
#define LD32_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \
|
#define LD32_ADD32(n, mod) \
|
||||||
"lduw [%3 + " #n "], %1\n" \
|
"lduw [%3 + " #n "], %1\n" \
|
||||||
"add" #mod " %2, %1, %0\n" \
|
"add" #mod " %2, %1, %0\n"
|
||||||
: "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr))
|
|
||||||
|
|
||||||
#define MOP(sum) __asm __volatile( \
|
#define MOP(sum, tmp, addr) \
|
||||||
"addc %1, 0, %0" : "=r" (sum) : "0" (sum))
|
"addc %2, 0, %0" \
|
||||||
|
: "=r" (sum), "=&r" (tmp) : "0" (sum), "r" (addr) : "cc"
|
||||||
|
|
||||||
u_short
|
u_short
|
||||||
in_cksum_skip(struct mbuf *m, int len, int skip)
|
in_cksum_skip(struct mbuf *m, int len, int skip)
|
||||||
@ -169,8 +170,10 @@ skip_start:
|
|||||||
mlen -= 2;
|
mlen -= 2;
|
||||||
}
|
}
|
||||||
if (((u_long)w & 4) != 0 && mlen >= 4) {
|
if (((u_long)w & 4) != 0 && mlen >= 4) {
|
||||||
LD32_ADD32(sum, tmp, w, 0, cc);
|
__asm(
|
||||||
MOP(sum);
|
LD32_ADD32(0, cc)
|
||||||
|
MOP(sum, tmp, w)
|
||||||
|
);
|
||||||
w += 2;
|
w += 2;
|
||||||
mlen -= 4;
|
mlen -= 4;
|
||||||
}
|
}
|
||||||
@ -181,36 +184,44 @@ skip_start:
|
|||||||
* branches &c small.
|
* branches &c small.
|
||||||
*/
|
*/
|
||||||
for (; mlen >= 64; mlen -= 64) {
|
for (; mlen >= 64; mlen -= 64) {
|
||||||
LD64_ADD32(sum, tmp, w, 0, cc);
|
__asm(
|
||||||
LD64_ADD32(sum, tmp, w, 8, ccc);
|
LD64_ADD32(0, cc)
|
||||||
LD64_ADD32(sum, tmp, w, 16, ccc);
|
LD64_ADD32(8, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 24, ccc);
|
LD64_ADD32(16, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 32, ccc);
|
LD64_ADD32(24, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 40, ccc);
|
LD64_ADD32(32, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 48, ccc);
|
LD64_ADD32(40, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 56, ccc);
|
LD64_ADD32(48, ccc)
|
||||||
MOP(sum);
|
LD64_ADD32(56, ccc)
|
||||||
|
MOP(sum, tmp, w)
|
||||||
|
);
|
||||||
w += 32;
|
w += 32;
|
||||||
}
|
}
|
||||||
if (mlen >= 32) {
|
if (mlen >= 32) {
|
||||||
LD64_ADD32(sum, tmp, w, 0, cc);
|
__asm(
|
||||||
LD64_ADD32(sum, tmp, w, 8, ccc);
|
LD64_ADD32(0, cc)
|
||||||
LD64_ADD32(sum, tmp, w, 16, ccc);
|
LD64_ADD32(8, ccc)
|
||||||
LD64_ADD32(sum, tmp, w, 24, ccc);
|
LD64_ADD32(16, ccc)
|
||||||
MOP(sum);
|
LD64_ADD32(24, ccc)
|
||||||
|
MOP(sum, tmp, w)
|
||||||
|
);
|
||||||
w += 16;
|
w += 16;
|
||||||
mlen -= 32;
|
mlen -= 32;
|
||||||
}
|
}
|
||||||
if (mlen >= 16) {
|
if (mlen >= 16) {
|
||||||
LD64_ADD32(sum, tmp, w, 0, cc);
|
__asm(
|
||||||
LD64_ADD32(sum, tmp, w, 8, ccc);
|
LD64_ADD32(0, cc)
|
||||||
MOP(sum);
|
LD64_ADD32(8, ccc)
|
||||||
|
MOP(sum, tmp, w)
|
||||||
|
);
|
||||||
w += 8;
|
w += 8;
|
||||||
mlen -= 16;
|
mlen -= 16;
|
||||||
}
|
}
|
||||||
if (mlen >= 8) {
|
if (mlen >= 8) {
|
||||||
LD64_ADD32(sum, tmp, w, 0, cc);
|
__asm(
|
||||||
MOP(sum);
|
LD64_ADD32(0, cc)
|
||||||
|
MOP(sum, tmp, w)
|
||||||
|
);
|
||||||
w += 4;
|
w += 4;
|
||||||
mlen -= 8;
|
mlen -= 8;
|
||||||
}
|
}
|
||||||
@ -229,7 +240,7 @@ skip_start:
|
|||||||
} else if (mlen == -1) {
|
} else if (mlen == -1) {
|
||||||
/*
|
/*
|
||||||
* This mbuf has odd number of bytes.
|
* This mbuf has odd number of bytes.
|
||||||
* There could be a word split betwen
|
* There could be a word split between
|
||||||
* this mbuf and the next mbuf.
|
* this mbuf and the next mbuf.
|
||||||
* Save the last byte (to prepend to next mbuf).
|
* Save the last byte (to prepend to next mbuf).
|
||||||
*/
|
*/
|
||||||
@ -240,8 +251,10 @@ skip_start:
|
|||||||
if (len)
|
if (len)
|
||||||
printf("%s: out of data by %d\n", __func__, len);
|
printf("%s: out of data by %d\n", __func__, len);
|
||||||
if (mlen == -1) {
|
if (mlen == -1) {
|
||||||
/* The last mbuf has odd # of bytes. Follow the
|
/*
|
||||||
standard (the odd byte is shifted left by 8 bits) */
|
* The last mbuf has odd # of bytes. Follow the
|
||||||
|
* standard (the odd byte is shifted left by 8 bits).
|
||||||
|
*/
|
||||||
sum += su & 0xff00;
|
sum += su & 0xff00;
|
||||||
}
|
}
|
||||||
REDUCE(sum, tmp);
|
REDUCE(sum, tmp);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user