net: implement CRC for ARM64 NEON
Added CRC compute APIs for arm64 utilizing the pmull capability. Added new file net_crc_neon.h to hold the arm64 pmull CRC implementation. Added wrappers in rte_vect.h for those neon intrinsics which are not supported in GCC version < 7. Verified the changes with crc_autotest unit test case Signed-off-by: Ashwin Sekhar T K <ashwin.sekhar@caviumnetworks.com> Acked-by: Jianbo Liu <jianbo.liu@linaro.org>
This commit is contained in:
parent
266451e419
commit
a566400e8b
@ -144,6 +144,7 @@ ARM v8
|
||||
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
|
||||
M: Jianbo Liu <jianbo.liu@linaro.org>
|
||||
F: lib/librte_eal/common/include/arch/arm/*_64.h
|
||||
F: lib/librte_net/net_crc_neon.h
|
||||
F: lib/librte_acl/acl_run_neon.*
|
||||
F: lib/librte_lpm/rte_lpm_neon.h
|
||||
F: lib/librte_hash/rte*_arm64.h
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include "generic/rte_vect.h"
|
||||
#include "rte_debug.h"
|
||||
#include "arm_neon.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -78,6 +79,93 @@ vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(RTE_ARCH_ARM64)
|
||||
#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000)
|
||||
/* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
|
||||
static inline uint64x2_t
|
||||
vreinterpretq_u64_p128(poly128_t x)
|
||||
{
|
||||
return (uint64x2_t)x;
|
||||
}
|
||||
|
||||
/* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
|
||||
static inline poly64x2_t
|
||||
vreinterpretq_p64_u64(uint64x2_t x)
|
||||
{
|
||||
return (poly64x2_t)x;
|
||||
}
|
||||
|
||||
/* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
|
||||
static inline poly64_t
|
||||
vgetq_lane_p64(poly64x2_t x, const int lane)
|
||||
{
|
||||
RTE_ASSERT(lane >= 0 && lane <= 1);
|
||||
|
||||
poly64_t *p = (poly64_t *)&x;
|
||||
|
||||
return p[lane];
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If (0 <= index <= 15), then call the ASIMD ext intruction on the
|
||||
* 128 bit regs v0 and v1 with the appropriate index.
|
||||
*
|
||||
* Else returns a zero vector.
|
||||
*/
|
||||
static inline uint8x16_t
|
||||
vextract(uint8x16_t v0, uint8x16_t v1, const int index)
|
||||
{
|
||||
switch (index) {
|
||||
case 0: return vextq_u8(v0, v1, 0);
|
||||
case 1: return vextq_u8(v0, v1, 1);
|
||||
case 2: return vextq_u8(v0, v1, 2);
|
||||
case 3: return vextq_u8(v0, v1, 3);
|
||||
case 4: return vextq_u8(v0, v1, 4);
|
||||
case 5: return vextq_u8(v0, v1, 5);
|
||||
case 6: return vextq_u8(v0, v1, 6);
|
||||
case 7: return vextq_u8(v0, v1, 7);
|
||||
case 8: return vextq_u8(v0, v1, 8);
|
||||
case 9: return vextq_u8(v0, v1, 9);
|
||||
case 10: return vextq_u8(v0, v1, 10);
|
||||
case 11: return vextq_u8(v0, v1, 11);
|
||||
case 12: return vextq_u8(v0, v1, 12);
|
||||
case 13: return vextq_u8(v0, v1, 13);
|
||||
case 14: return vextq_u8(v0, v1, 14);
|
||||
case 15: return vextq_u8(v0, v1, 15);
|
||||
}
|
||||
return vdupq_n_u8(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Shifts right 128 bit register by specified number of bytes
|
||||
*
|
||||
* Value of shift parameter must be in range 0 - 16
|
||||
*/
|
||||
static inline uint64x2_t
|
||||
vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
|
||||
{
|
||||
return vreinterpretq_u64_u8(vextract(
|
||||
vreinterpretq_u8_u64(reg),
|
||||
vdupq_n_u8(0),
|
||||
shift));
|
||||
}
|
||||
|
||||
/**
|
||||
* Shifts left 128 bit register by specified number of bytes
|
||||
*
|
||||
* Value of shift parameter must be in range 0 - 16
|
||||
*/
|
||||
static inline uint64x2_t
|
||||
vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
|
||||
{
|
||||
return vreinterpretq_u64_u8(vextract(
|
||||
vdupq_n_u8(0),
|
||||
vreinterpretq_u8_u64(reg),
|
||||
16 - shift));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
297
lib/librte_net/net_crc_neon.h
Normal file
297
lib/librte_net/net_crc_neon.h
Normal file
@ -0,0 +1,297 @@
|
||||
/*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (C) Cavium networks Ltd. 2017.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Cavium networks nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _NET_CRC_NEON_H_
|
||||
#define _NET_CRC_NEON_H_
|
||||
|
||||
#include <rte_branch_prediction.h>
|
||||
#include <rte_net_crc.h>
|
||||
#include <rte_vect.h>
|
||||
#include <rte_cpuflags.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** PMULL CRC computation context structure */
|
||||
struct crc_pmull_ctx {
|
||||
uint64x2_t rk1_rk2;
|
||||
uint64x2_t rk5_rk6;
|
||||
uint64x2_t rk7_rk8;
|
||||
};
|
||||
|
||||
struct crc_pmull_ctx crc32_eth_pmull __rte_aligned(16);
|
||||
struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16);
|
||||
|
||||
/**
|
||||
* @brief Performs one folding round
|
||||
*
|
||||
* Logically function operates as follows:
|
||||
* DATA = READ_NEXT_16BYTES();
|
||||
* F1 = LSB8(FOLD)
|
||||
* F2 = MSB8(FOLD)
|
||||
* T1 = CLMUL(F1, RK1)
|
||||
* T2 = CLMUL(F2, RK2)
|
||||
* FOLD = XOR(T1, T2, DATA)
|
||||
*
|
||||
* @param data_block 16 byte data block
|
||||
* @param precomp precomputed rk1 constanst
|
||||
* @param fold running 16 byte folded data
|
||||
*
|
||||
* @return New 16 byte folded data
|
||||
*/
|
||||
static inline uint64x2_t
|
||||
crcr32_folding_round(uint64x2_t data_block, uint64x2_t precomp,
|
||||
uint64x2_t fold)
|
||||
{
|
||||
uint64x2_t tmp0 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(fold), 1),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
|
||||
|
||||
uint64x2_t tmp1 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(fold), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
|
||||
|
||||
return veorq_u64(tmp1, veorq_u64(data_block, tmp0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs reduction from 128 bits to 64 bits
|
||||
*
|
||||
* @param data128 128 bits data to be reduced
|
||||
* @param precomp rk5 and rk6 precomputed constants
|
||||
*
|
||||
* @return data reduced to 64 bits
|
||||
*/
|
||||
static inline uint64x2_t
|
||||
crcr32_reduce_128_to_64(uint64x2_t data128,
|
||||
uint64x2_t precomp)
|
||||
{
|
||||
uint64x2_t tmp0, tmp1, tmp2;
|
||||
|
||||
/* 64b fold */
|
||||
tmp0 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(data128), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
|
||||
tmp1 = vshift_bytes_right(data128, 8);
|
||||
tmp0 = veorq_u64(tmp0, tmp1);
|
||||
|
||||
/* 32b fold */
|
||||
tmp2 = vshift_bytes_left(tmp0, 4);
|
||||
tmp1 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(tmp2), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
|
||||
|
||||
return veorq_u64(tmp1, tmp0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs Barret's reduction from 64 bits to 32 bits
|
||||
*
|
||||
* @param data64 64 bits data to be reduced
|
||||
* @param precomp rk7 precomputed constant
|
||||
*
|
||||
* @return data reduced to 32 bits
|
||||
*/
|
||||
static inline uint32_t
|
||||
crcr32_reduce_64_to_32(uint64x2_t data64,
|
||||
uint64x2_t precomp)
|
||||
{
|
||||
static uint32_t mask1[4] __rte_aligned(16) = {
|
||||
0xffffffff, 0xffffffff, 0x00000000, 0x00000000
|
||||
};
|
||||
static uint32_t mask2[4] __rte_aligned(16) = {
|
||||
0x00000000, 0xffffffff, 0xffffffff, 0xffffffff
|
||||
};
|
||||
uint64x2_t tmp0, tmp1, tmp2;
|
||||
|
||||
tmp0 = vandq_u64(data64, vld1q_u64((uint64_t *)mask2));
|
||||
|
||||
tmp1 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(tmp0), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
|
||||
tmp1 = veorq_u64(tmp1, tmp0);
|
||||
tmp1 = vandq_u64(tmp1, vld1q_u64((uint64_t *)mask1));
|
||||
|
||||
tmp2 = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(tmp1), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
|
||||
tmp2 = veorq_u64(tmp2, tmp1);
|
||||
tmp2 = veorq_u64(tmp2, tmp0);
|
||||
|
||||
return vgetq_lane_u32(vreinterpretq_u32_u64(tmp2), 2);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
crc32_eth_calc_pmull(
|
||||
const uint8_t *data,
|
||||
uint32_t data_len,
|
||||
uint32_t crc,
|
||||
const struct crc_pmull_ctx *params)
|
||||
{
|
||||
uint64x2_t temp, fold, k;
|
||||
uint32_t n;
|
||||
|
||||
/* Get CRC init value */
|
||||
temp = vreinterpretq_u64_u32(vsetq_lane_u32(crc, vmovq_n_u32(0), 0));
|
||||
|
||||
/**
|
||||
* Folding all data into single 16 byte data block
|
||||
* Assumes: fold holds first 16 bytes of data
|
||||
*/
|
||||
if (unlikely(data_len < 32)) {
|
||||
if (unlikely(data_len == 16)) {
|
||||
/* 16 bytes */
|
||||
fold = vld1q_u64((const uint64_t *)data);
|
||||
fold = veorq_u64(fold, temp);
|
||||
goto reduction_128_64;
|
||||
}
|
||||
|
||||
if (unlikely(data_len < 16)) {
|
||||
/* 0 to 15 bytes */
|
||||
uint8_t buffer[16] __rte_aligned(16);
|
||||
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
memcpy(buffer, data, data_len);
|
||||
|
||||
fold = vld1q_u64((uint64_t *)buffer);
|
||||
fold = veorq_u64(fold, temp);
|
||||
if (unlikely(data_len < 4)) {
|
||||
fold = vshift_bytes_left(fold, 8 - data_len);
|
||||
goto barret_reduction;
|
||||
}
|
||||
fold = vshift_bytes_left(fold, 16 - data_len);
|
||||
goto reduction_128_64;
|
||||
}
|
||||
/* 17 to 31 bytes */
|
||||
fold = vld1q_u64((const uint64_t *)data);
|
||||
fold = veorq_u64(fold, temp);
|
||||
n = 16;
|
||||
k = params->rk1_rk2;
|
||||
goto partial_bytes;
|
||||
}
|
||||
|
||||
/** At least 32 bytes in the buffer */
|
||||
/** Apply CRC initial value */
|
||||
fold = vld1q_u64((const uint64_t *)data);
|
||||
fold = veorq_u64(fold, temp);
|
||||
|
||||
/** Main folding loop - the last 16 bytes is processed separately */
|
||||
k = params->rk1_rk2;
|
||||
for (n = 16; (n + 16) <= data_len; n += 16) {
|
||||
temp = vld1q_u64((const uint64_t *)&data[n]);
|
||||
fold = crcr32_folding_round(temp, k, fold);
|
||||
}
|
||||
|
||||
partial_bytes:
|
||||
if (likely(n < data_len)) {
|
||||
uint64x2_t last16, a, b, mask;
|
||||
uint32_t rem = data_len & 15;
|
||||
|
||||
last16 = vld1q_u64((const uint64_t *)&data[data_len - 16]);
|
||||
a = vshift_bytes_left(fold, 16 - rem);
|
||||
b = vshift_bytes_right(fold, rem);
|
||||
mask = vshift_bytes_left(vdupq_n_u64(-1), 16 - rem);
|
||||
b = vorrq_u64(b, vandq_u64(mask, last16));
|
||||
|
||||
/* k = rk1 & rk2 */
|
||||
temp = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(a), 1),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(k), 0)));
|
||||
fold = vreinterpretq_u64_p128(vmull_p64(
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(a), 0),
|
||||
vgetq_lane_p64(vreinterpretq_p64_u64(k), 1)));
|
||||
fold = veorq_u64(fold, temp);
|
||||
fold = veorq_u64(fold, b);
|
||||
}
|
||||
|
||||
/** Reduction 128 -> 32 Assumes: fold holds 128bit folded data */
|
||||
reduction_128_64:
|
||||
k = params->rk5_rk6;
|
||||
fold = crcr32_reduce_128_to_64(fold, k);
|
||||
|
||||
barret_reduction:
|
||||
k = params->rk7_rk8;
|
||||
n = crcr32_reduce_64_to_32(fold, k);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rte_net_crc_neon_init(void)
|
||||
{
|
||||
/* Initialize CRC16 data */
|
||||
uint64_t ccitt_k1_k2[2] = {0x189aeLLU, 0x8e10LLU};
|
||||
uint64_t ccitt_k5_k6[2] = {0x189aeLLU, 0x114aaLLU};
|
||||
uint64_t ccitt_k7_k8[2] = {0x11c581910LLU, 0x10811LLU};
|
||||
|
||||
/* Initialize CRC32 data */
|
||||
uint64_t eth_k1_k2[2] = {0xccaa009eLLU, 0x1751997d0LLU};
|
||||
uint64_t eth_k5_k6[2] = {0xccaa009eLLU, 0x163cd6124LLU};
|
||||
uint64_t eth_k7_k8[2] = {0x1f7011640LLU, 0x1db710641LLU};
|
||||
|
||||
/** Save the params in context structure */
|
||||
crc16_ccitt_pmull.rk1_rk2 = vld1q_u64(ccitt_k1_k2);
|
||||
crc16_ccitt_pmull.rk5_rk6 = vld1q_u64(ccitt_k5_k6);
|
||||
crc16_ccitt_pmull.rk7_rk8 = vld1q_u64(ccitt_k7_k8);
|
||||
|
||||
/** Save the params in context structure */
|
||||
crc32_eth_pmull.rk1_rk2 = vld1q_u64(eth_k1_k2);
|
||||
crc32_eth_pmull.rk5_rk6 = vld1q_u64(eth_k5_k6);
|
||||
crc32_eth_pmull.rk7_rk8 = vld1q_u64(eth_k7_k8);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
rte_crc16_ccitt_neon_handler(const uint8_t *data,
|
||||
uint32_t data_len)
|
||||
{
|
||||
return (uint16_t)~crc32_eth_calc_pmull(data,
|
||||
data_len,
|
||||
0xffff,
|
||||
&crc16_ccitt_pmull);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
rte_crc32_eth_neon_handler(const uint8_t *data,
|
||||
uint32_t data_len)
|
||||
{
|
||||
return ~crc32_eth_calc_pmull(data,
|
||||
data_len,
|
||||
0xffffffffUL,
|
||||
&crc32_eth_pmull);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _NET_CRC_NEON_H_ */
|
@ -41,10 +41,14 @@
|
||||
|
||||
#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_PCLMULQDQ)
|
||||
#define X86_64_SSE42_PCLMULQDQ 1
|
||||
#elif defined(RTE_ARCH_ARM64) && defined(RTE_MACHINE_CPUFLAG_PMULL)
|
||||
#define ARM64_NEON_PMULL 1
|
||||
#endif
|
||||
|
||||
#ifdef X86_64_SSE42_PCLMULQDQ
|
||||
#include <net_crc_sse.h>
|
||||
#elif defined ARM64_NEON_PMULL
|
||||
#include <net_crc_neon.h>
|
||||
#endif
|
||||
|
||||
/* crc tables */
|
||||
@ -72,6 +76,11 @@ static rte_net_crc_handler handlers_sse42[] = {
|
||||
[RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_sse42_handler,
|
||||
[RTE_NET_CRC32_ETH] = rte_crc32_eth_sse42_handler,
|
||||
};
|
||||
#elif defined ARM64_NEON_PMULL
|
||||
static rte_net_crc_handler handlers_neon[] = {
|
||||
[RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_neon_handler,
|
||||
[RTE_NET_CRC32_ETH] = rte_crc32_eth_neon_handler,
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -160,14 +169,21 @@ void
|
||||
rte_net_crc_set_alg(enum rte_net_crc_alg alg)
|
||||
{
|
||||
switch (alg) {
|
||||
case RTE_NET_CRC_SSE42:
|
||||
#ifdef X86_64_SSE42_PCLMULQDQ
|
||||
case RTE_NET_CRC_SSE42:
|
||||
handlers = handlers_sse42;
|
||||
#else
|
||||
alg = RTE_NET_CRC_SCALAR;
|
||||
#endif
|
||||
break;
|
||||
#elif defined ARM64_NEON_PMULL
|
||||
/* fall-through */
|
||||
case RTE_NET_CRC_NEON:
|
||||
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_PMULL)) {
|
||||
handlers = handlers_neon;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
/* fall-through */
|
||||
case RTE_NET_CRC_SCALAR:
|
||||
/* fall-through */
|
||||
default:
|
||||
handlers = handlers_scalar;
|
||||
break;
|
||||
@ -197,8 +213,13 @@ rte_net_crc_init(void)
|
||||
rte_net_crc_scalar_init();
|
||||
|
||||
#ifdef X86_64_SSE42_PCLMULQDQ
|
||||
alg = RTE_NET_CRC_SSE42;
|
||||
rte_net_crc_sse42_init();
|
||||
alg = RTE_NET_CRC_SSE42;
|
||||
rte_net_crc_sse42_init();
|
||||
#elif defined ARM64_NEON_PMULL
|
||||
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_PMULL)) {
|
||||
alg = RTE_NET_CRC_NEON;
|
||||
rte_net_crc_neon_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
rte_net_crc_set_alg(alg);
|
||||
|
@ -57,6 +57,7 @@ enum rte_net_crc_type {
|
||||
enum rte_net_crc_alg {
|
||||
RTE_NET_CRC_SCALAR = 0,
|
||||
RTE_NET_CRC_SSE42,
|
||||
RTE_NET_CRC_NEON,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -68,6 +69,7 @@ enum rte_net_crc_alg {
|
||||
* This parameter is used to select the CRC implementation version.
|
||||
* - RTE_NET_CRC_SCALAR
|
||||
* - RTE_NET_CRC_SSE42 (Use 64-bit SSE4.2 intrinsic)
|
||||
* - RTE_NET_CRC_NEON (Use ARM Neon intrinsic)
|
||||
*/
|
||||
void
|
||||
rte_net_crc_set_alg(enum rte_net_crc_alg alg);
|
||||
|
@ -178,6 +178,15 @@ test_crc(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* set CRC neon mode */
|
||||
rte_net_crc_set_alg(RTE_NET_CRC_NEON);
|
||||
|
||||
ret = test_crc_calc();
|
||||
if (ret < 0) {
|
||||
printf("test crc (arm64 neon pmull): failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user