From 906afc503523f5d2fa000931faf398524b757757 Mon Sep 17 00:00:00 2001 From: marius Date: Mon, 21 Jul 2008 16:59:22 +0000 Subject: [PATCH] MFC: r180298; r180299 - Fix spelling and style. - Use __FBSDID. - Merge macros depending on the flags being preserved between calls into a single "__asm"-statement as GCC doesn't guarantee their consecutive output even when using consecutive "__asm __volatile"- statement for them. Remove the otherwise unnecessary "__volatile". [1] - The inline assembler instructions used here alter the condition codes so add them to the clobber list accordingly. - The inline assembler instructions used here uses output operands before all input operands are consumed so add appropriate modifiers. Pointed out by: bde [1] --- sys/sparc64/sparc64/in_cksum.c | 109 ++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 48 deletions(-) diff --git a/sys/sparc64/sparc64/in_cksum.c b/sys/sparc64/sparc64/in_cksum.c index 8239b110e772..47e69cdc6086 100644 --- a/sys/sparc64/sparc64/in_cksum.c +++ b/sys/sparc64/sparc64/in_cksum.c @@ -53,10 +53,11 @@ * from tahoe: in_cksum.c 1.2 86/01/05 * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91 * from: FreeBSD: src/sys/i386/i386/in_cksum.c,v 1.22 2000/11/25 - * - * $FreeBSD$ */ +#include +__FBSDID("$FreeBSD$"); + #include #include #include @@ -73,10 +74,10 @@ * This routine is very heavily used in the network * code and should be modified for each CPU to be as fast as possible. * - * This implementation is a sparc64 version. Most code was taken over and - * adapted from the i386. Some optimizations were changed to achieve (hopefully) - * better performance. - * This uses 64 bit loads, but 32 bit additions due to the lack of a 64-bit + * This implementation is a sparc64 version. Most code was taken over + * and adapted from the i386. Some optimizations were changed to achieve + * (hopefully) better performance. + * This uses 64-bit loads, but 32-bit additions due to the lack of a 64-bit * add-with-carry operation. */ @@ -84,29 +85,29 @@ * REDUCE() is actually not used that frequently... maybe a C implementation * would suffice. */ -#define REDUCE(sum, tmp) __asm __volatile( \ - "sll %2, 16, %1\n" \ - "addcc %2, %1, %0\n" \ - "srl %0, 16, %0\n" \ - "addc %0, 0, %0" : "=r" (sum), "=r" (tmp) : "0" (sum)) +#define REDUCE(sum, tmp) __asm( \ + "sll %2, 16, %1\n" \ + "addcc %2, %1, %0\n" \ + "srl %0, 16, %0\n" \ + "addc %0, 0, %0" : "=r" (sum), "=&r" (tmp) : "0" (sum) : "cc") /* - * Note that some of these macros depend on the flags being preserved between - * calls, so they should not be intermixed with other C statements. + * Note that some of these macros depend on the flags being preserved + * between calls, thus they have to be used within a single __asm(). */ -#define LD64_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ - "ldx [%3 + " #n "], %1\n" \ - "add" #mod " %2, %1, %0\n" \ - "srlx %1, 32, %1\n" \ - "addccc %0, %1, %0" : "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr)) +#define LD64_ADD32(n, mod) \ + "ldx [%3 + " #n "], %1\n" \ + "add" #mod " %2, %1, %0\n" \ + "srlx %1, 32, %1\n" \ + "addccc %0, %1, %0\n" -#define LD32_ADD32(sum, tmp, addr, n, mod) __asm __volatile( \ - "lduw [%3 + " #n "], %1\n" \ - "add" #mod " %2, %1, %0\n" \ - : "=r" (sum), "=r" (tmp) : "0" (sum), "r" (addr)) +#define LD32_ADD32(n, mod) \ + "lduw [%3 + " #n "], %1\n" \ + "add" #mod " %2, %1, %0\n" -#define MOP(sum) __asm __volatile( \ - "addc %1, 0, %0" : "=r" (sum) : "0" (sum)) +#define MOP(sum, tmp, addr) \ + "addc %2, 0, %0" \ + : "=r" (sum), "=&r" (tmp) : "0" (sum), "r" (addr) : "cc" u_short in_cksum_skip(struct mbuf *m, int len, int skip) @@ -169,8 +170,10 @@ skip_start: mlen -= 2; } if (((u_long)w & 4) != 0 && mlen >= 4) { - LD32_ADD32(sum, tmp, w, 0, cc); - MOP(sum); + __asm( + LD32_ADD32(0, cc) + MOP(sum, tmp, w) + ); w += 2; mlen -= 4; } @@ -181,36 +184,44 @@ skip_start: * branches &c small. */ for (; mlen >= 64; mlen -= 64) { - LD64_ADD32(sum, tmp, w, 0, cc); - LD64_ADD32(sum, tmp, w, 8, ccc); - LD64_ADD32(sum, tmp, w, 16, ccc); - LD64_ADD32(sum, tmp, w, 24, ccc); - LD64_ADD32(sum, tmp, w, 32, ccc); - LD64_ADD32(sum, tmp, w, 40, ccc); - LD64_ADD32(sum, tmp, w, 48, ccc); - LD64_ADD32(sum, tmp, w, 56, ccc); - MOP(sum); + __asm( + LD64_ADD32(0, cc) + LD64_ADD32(8, ccc) + LD64_ADD32(16, ccc) + LD64_ADD32(24, ccc) + LD64_ADD32(32, ccc) + LD64_ADD32(40, ccc) + LD64_ADD32(48, ccc) + LD64_ADD32(56, ccc) + MOP(sum, tmp, w) + ); w += 32; } if (mlen >= 32) { - LD64_ADD32(sum, tmp, w, 0, cc); - LD64_ADD32(sum, tmp, w, 8, ccc); - LD64_ADD32(sum, tmp, w, 16, ccc); - LD64_ADD32(sum, tmp, w, 24, ccc); - MOP(sum); + __asm( + LD64_ADD32(0, cc) + LD64_ADD32(8, ccc) + LD64_ADD32(16, ccc) + LD64_ADD32(24, ccc) + MOP(sum, tmp, w) + ); w += 16; mlen -= 32; } if (mlen >= 16) { - LD64_ADD32(sum, tmp, w, 0, cc); - LD64_ADD32(sum, tmp, w, 8, ccc); - MOP(sum); + __asm( + LD64_ADD32(0, cc) + LD64_ADD32(8, ccc) + MOP(sum, tmp, w) + ); w += 8; mlen -= 16; } if (mlen >= 8) { - LD64_ADD32(sum, tmp, w, 0, cc); - MOP(sum); + __asm( + LD64_ADD32(0, cc) + MOP(sum, tmp, w) + ); w += 4; mlen -= 8; } @@ -229,7 +240,7 @@ skip_start: } else if (mlen == -1) { /* * This mbuf has odd number of bytes. - * There could be a word split betwen + * There could be a word split between * this mbuf and the next mbuf. * Save the last byte (to prepend to next mbuf). */ @@ -240,8 +251,10 @@ skip_start: if (len) printf("%s: out of data by %d\n", __func__, len); if (mlen == -1) { - /* The last mbuf has odd # of bytes. Follow the - standard (the odd byte is shifted left by 8 bits) */ + /* + * The last mbuf has odd # of bytes. Follow the + * standard (the odd byte is shifted left by 8 bits). + */ sum += su & 0xff00; } REDUCE(sum, tmp);