freebsd-skq/contrib/gcc/config/i386/tmmintrin.h
mm 36f9eb3065 Backport SSSE3 instruction set support to base gcc.
Enabled by default for -march=core2

Obtained from:	gcc 4.3 (rev. 117958, 121687, 121726, 123639; GPLv2)
MFC after:	2 weeks
2011-03-14 13:31:34 +00:00

449 lines
13 KiB
C

/* Copyright (C) 2006 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* As a special exception, if you include this header file into source
files compiled by GCC, this header file does not by itself cause
the resulting executable to be covered by the GNU General Public
License. This exception does not however invalidate any other
reasons why the executable file might be covered by the GNU General
Public License. */
/* Implemented from the specification included in the Intel C++ Compiler
User Guide and Reference, version 9.1. */
#ifndef _TMMINTRIN_H_INCLUDED
#define _TMMINTRIN_H_INCLUDED
#ifdef __SSSE3__
#include <pmmintrin.h>
static __inline __m128i __attribute__((__always_inline__))
_mm_hadd_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hadd_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hadds_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadd_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadd_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadds_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsub_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsub_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsubs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsub_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsub_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsubs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_maddubs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_maddubs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_mulhrs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_mulhrs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_shuffle_epi8 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_shuffle_pi8 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi8 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi8 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
}
#define _mm_alignr_epi8(__X, __Y, __N) \
((__m128i)__builtin_ia32_palignr128 ((__v2di) __X, (__v2di) __Y, (__N) * 8))
#define _mm_alignr_pi8(__X, __Y, __N) \
((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi8 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi16 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi32 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi8 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsb ((__v8qi)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi16 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsw ((__v4hi)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi32 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsd ((__v2si)__X);
}
#endif /* __SSSE3__ */
#endif /* _TMMINTRIN_H_INCLUDED */
/* Copyright (C) 2006 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* As a special exception, if you include this header file into source
files compiled by GCC, this header file does not by itself cause
the resulting executable to be covered by the GNU General Public
License. This exception does not however invalidate any other
reasons why the executable file might be covered by the GNU General
Public License. */
/* Implemented from the specification included in the Intel C++ Compiler
User Guide and Reference, version 9.1. */
#ifndef _TMMINTRIN_H_INCLUDED
#define _TMMINTRIN_H_INCLUDED
#ifdef __SSSE3__
#include <pmmintrin.h>
static __inline __m128i __attribute__((__always_inline__))
_mm_hadd_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hadd_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hadds_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadd_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadd_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hadds_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsub_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsub_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_hsubs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsub_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsub_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_hsubs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_maddubs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_maddubs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_mulhrs_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_mulhrs_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_shuffle_epi8 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_shuffle_pi8 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi8 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi16 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_sign_epi32 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi8 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi16 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_sign_pi32 (__m64 __X, __m64 __Y)
{
return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
}
#define _mm_alignr_epi8(__X, __Y, __N) \
((__m128i)__builtin_ia32_palignr128 ((__v2di) __X, (__v2di) __Y, (__N) * 8))
#define _mm_alignr_pi8(__X, __Y, __N) \
((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi8 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi16 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X);
}
static __inline __m128i __attribute__((__always_inline__))
_mm_abs_epi32 (__m128i __X)
{
return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi8 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsb ((__v8qi)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi16 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsw ((__v4hi)__X);
}
static __inline __m64 __attribute__((__always_inline__))
_mm_abs_pi32 (__m64 __X)
{
return (__m64) __builtin_ia32_pabsd ((__v2si)__X);
}
#endif /* __SSSE3__ */
#endif /* _TMMINTRIN_H_INCLUDED */