e9b9739264
RTE_ARCH_xx flags are used to distinguish platform architectures. These flags can be used to pick different code paths for different architectures at compile time. For Arm platforms, there are 3 flags in use: RTE_ARCH_ARM, RTE_ARCH_ARMv7 and RTE_ARCH_ARM64. RTE_ARCH_ARM64 is for 64-bit aarch64 platforms, and RTE_ARCH_ARM & RTE_ARCH_ARMv7 are for 32-bit platforms. RTE_ARCH_ARMv7 is for ARMv7 platforms as its name suggested. The issue is meaning of RTE_ARCH_ARM is not clear enough. Because no info about platform word length is included in the name. To make the flag names more clear, a naming scheme is proposed. RTE_ARCH_ARM (all Arm platforms) | +----RTE_ARCH_32 (New. 32-bit platforms of all architectures) | | | +----RTE_ARCH_ARMv7 (ARMv7 platforms) | | | +----RTE_ARCH_ARMv8_AARCH32 (aarch32 state on aarch64 machine) | +----RTE_ARCH_64 (64-bit platforms of all architectures) | +----RTE_ARCH_ARM64 (64-bit Arm platforms) RTE_ARCH_32 will be explicitly defined for 32-bit platforms. To fit into the new naming scheme, current usage of RTE_ARCH_ARM in project is mapped to (RTE_ARCH_ARM && RTE_ARCH_32). Matching flags for other architectures are: RTE_ARCH_X86 | +----RTE_ARCH_32 | | | +----RTE_ARCH_I686 | | | +----RTE_ARCH_X86_X32 | +----RTE_ARCH_64 | +----RTE_ARCH_X86_64 RTE_ARCH_PPC_64 ---- RTE_ARCH_64 Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> Reviewed-by: Phil Yang <phil.yang@arm.com>
55 lines
1.4 KiB
C
55 lines
1.4 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2015 Cavium, Inc
|
|
*/
|
|
|
|
#ifndef _TEST_XMMT_OPS_H_
|
|
#define _TEST_XMMT_OPS_H_
|
|
|
|
#include <rte_vect.h>
|
|
|
|
#if defined(RTE_ARCH_ARM)
|
|
|
|
/* vect_* abstraction implementation using NEON */
|
|
|
|
/* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
|
|
#define vect_loadu_sil128(p) vld1q_s32((const int32_t *)p)
|
|
|
|
/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
|
|
static __rte_always_inline xmm_t
|
|
vect_set_epi32(int i3, int i2, int i1, int i0)
|
|
{
|
|
int32_t data[4] = {i0, i1, i2, i3};
|
|
|
|
return vld1q_s32(data);
|
|
}
|
|
|
|
#elif defined(RTE_ARCH_X86)
|
|
|
|
/* vect_* abstraction implementation using SSE */
|
|
|
|
/* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
|
|
#define vect_loadu_sil128(p) _mm_loadu_si128(p)
|
|
|
|
/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
|
|
#define vect_set_epi32(i3, i2, i1, i0) _mm_set_epi32(i3, i2, i1, i0)
|
|
|
|
#elif defined(RTE_ARCH_PPC_64)
|
|
|
|
/* vect_* abstraction implementation using ALTIVEC */
|
|
|
|
/* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
|
|
#define vect_loadu_sil128(p) vec_ld(0, p)
|
|
|
|
/* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
|
|
static __rte_always_inline xmm_t
|
|
vect_set_epi32(int i3, int i2, int i1, int i0)
|
|
{
|
|
xmm_t data = (xmm_t){i0, i1, i2, i3};
|
|
|
|
return data;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* _TEST_XMMT_OPS_H_ */
|